comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Is there a way to make `1024` configurable? This is a pretty small number for average application. | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | .getValue(); | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file not existed");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file does not exist");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} |
@rickle-msft @ibrahimrabab Should we introduce `shareFileClient.createIfNotExists(size)` ? | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | if (!this.exists()) { | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file not existed");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file does not exist");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} |
Thanks @kasobol-msft, we really should not use this `1024` value here. But in a `Resource` implementation, it's not reasonable for users to define a `maximum size` for a file (if the file does not exist). The users locate a resource using `azure-file://some-share/some-file`. So we have to figure out how to make it configurable. How about implementing this feature request in another PR? | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | .getValue(); | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file not existed");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file does not exist");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} |
sounds good. please create GH issue to track. | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | .getValue(); | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file not existed");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file does not exist");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} |
https://github.com/Azure/azure-sdk-for-java/issues/30120 | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | .getValue(); | private void create() throws ShareStorageException {
if (!this.exists()) {
ShareFileHttpHeaders header = null;
if (StringUtils.hasText(contentType)) {
header = new ShareFileHttpHeaders();
header.setContentType(contentType);
}
this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE)
.getValue();
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file not existed");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} | class StorageFileResource extends AzureStorageResource {
private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file";
private final ShareServiceClient shareServiceClient;
private final ShareClient shareClient;
private final ShareFileClient shareFileClient;
private final String location;
private final boolean autoCreateFiles;
private final String contentType;
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location) {
this(shareServiceClient, location, false);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) {
this(shareServiceClient, location, autoCreateFiles, null);
}
/**
* Creates a new instance of {@link StorageFileResource}.
*
* @param shareServiceClient the ShareServiceClient
* @param location the location
* @param autoCreateFiles whether to automatically create files
* @param contentType the content type
*/
public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles,
String contentType) {
assertIsAzureStorageLocation(location);
this.autoCreateFiles = autoCreateFiles;
this.location = location;
this.shareServiceClient = shareServiceClient;
this.shareClient = shareServiceClient.getShareClient(getContainerName(location));
this.shareFileClient = shareClient.getFileClient(getFilename(location));
this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location);
}
/**
* Checks whether an Azure Storage File can be opened,
* if the file is not existed, and autoCreateFiles==true,
* it will create the file on Azure Storage.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws IOException when fail to open the output stream.
*/
@Override
public OutputStream getOutputStream() throws IOException {
try {
if (this.autoCreateFiles) {
this.shareClient.createIfNotExists();
this.create();
}
return this.shareFileClient.getFileOutputStream();
} catch (ShareStorageException e) {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
/**
* Determines if the file this client represents exists in the cloud.
*
* @return Flag indicating existence of the file.
*/
@Override
public boolean exists() {
return this.shareFileClient.exists();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
@Override
public URL getURL() throws IOException {
return new URL(this.shareFileClient.getFileUrl());
}
/**
* This implementation throws a FileNotFoundException, assuming
* that the resource cannot be resolved to an absolute file path.
*/
@Override
public File getFile() {
throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path");
}
/**
* @return The number of bytes present in the response body.
*/
@Override
public long contentLength() {
return this.shareFileClient.getProperties().getContentLength();
}
/**
*
* @return Last time the directory was modified.
*/
@Override
public long lastModified() {
return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000;
}
/**
* Create relative resource from current location.
*
* @param relativePath the relative path.
* @return StorageFileResource with relative path from current location.
*/
@Override
public Resource createRelative(String relativePath) {
String newLocation = this.location + "/" + relativePath;
return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles);
}
/**
* @return The name of the file.
*/
@Override
public String getFilename() {
final String[] split = this.shareFileClient.getFilePath().split("/");
return split[split.length - 1];
}
/**
* @return a description for this resource,
* to be used for error output when working with the resource.
*/
@Override
public String getDescription() {
return String.format("Azure storage account file resource [container='%s', file='%s']",
this.shareFileClient.getShareName(), this.getFilename());
}
@Override
public InputStream getInputStream() throws IOException {
try {
return this.shareFileClient.openInputStream();
} catch (ShareStorageException e) {
if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND
|| e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) {
throw new FileNotFoundException("Share or file does not exist");
} else {
throw new IOException(MSG_FAIL_OPEN_OUTPUT, e);
}
}
}
@Override
StorageType getStorageType() {
return StorageType.FILE;
}
} |
indent | public void userEventTriggered(final ChannelHandlerContext context, final Object event) {
this.traceOperation(context, "userEventTriggered", event);
try {
if (event instanceof IdleStateEvent) {
if (this.healthChecker instanceof RntbdClientChannelHealthChecker) {
((RntbdClientChannelHealthChecker) this.healthChecker)
.isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) {
return;
}
cause = new UnhealthyChannelException(future.get());
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
} else {
this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (future.get()) {
return;
}
cause = new UnhealthyChannelException(
MessageFormat.format(
"Custom ChannelHealthChecker {0} failed.",
this.healthChecker.getClass().getSimpleName()));
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
}
return;
}
if (event instanceof RntbdContext) {
this.contextFuture.complete((RntbdContext) event);
this.removeContextNegotiatorAndFlushPendingWrites(context);
return;
}
if (event instanceof RntbdContextException) {
this.contextFuture.completeExceptionally((RntbdContextException) event);
context.pipeline().flush().close();
return;
}
if (event instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event;
if (!sslHandshakeCompletionEvent.isSuccess()) {
if (logger.isDebugEnabled()) {
logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause());
}
this.exceptionCaught(context, sslHandshakeCompletionEvent.cause());
return;
} else {
logger.info("adding idleStateHandler");
context.pipeline().addFirst(
new IdleStateHandler(
this.idleConnectionTimerResolutionInNanos,
this.idleConnectionTimerResolutionInNanos,
0,
TimeUnit.NANOSECONDS));
}
}
context.fireUserEventTriggered(event);
} catch (Throwable error) {
reportIssue(context, "{}: ", event, error);
this.exceptionCaught(context, error);
}
} | new IdleStateHandler( | public void userEventTriggered(final ChannelHandlerContext context, final Object event) {
this.traceOperation(context, "userEventTriggered", event);
try {
if (event instanceof IdleStateEvent) {
if (this.healthChecker instanceof RntbdClientChannelHealthChecker) {
((RntbdClientChannelHealthChecker) this.healthChecker)
.isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) {
return;
}
cause = new UnhealthyChannelException(future.get());
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
} else {
this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (future.get()) {
return;
}
cause = new UnhealthyChannelException(
MessageFormat.format(
"Custom ChannelHealthChecker {0} failed.",
this.healthChecker.getClass().getSimpleName()));
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
}
return;
}
if (event instanceof RntbdContext) {
this.contextFuture.complete((RntbdContext) event);
this.removeContextNegotiatorAndFlushPendingWrites(context);
return;
}
if (event instanceof RntbdContextException) {
this.contextFuture.completeExceptionally((RntbdContextException) event);
this.exceptionCaught(context, (RntbdContextException)event);
return;
}
if (event instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event;
if (sslHandshakeCompletionEvent.isSuccess()) {
if (logger.isDebugEnabled()) {
logger.debug("SslHandshake completed, adding idleStateHandler");
}
context.pipeline().addAfter(
SslHandler.class.toString(),
IdleStateHandler.class.toString(),
new IdleStateHandler(
this.idleConnectionTimerResolutionInNanos,
this.idleConnectionTimerResolutionInNanos,
0,
TimeUnit.NANOSECONDS));
} else {
if (logger.isDebugEnabled()) {
logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause());
}
this.exceptionCaught(context, sslHandshakeCompletionEvent.cause());
return;
}
}
context.fireUserEventTriggered(event);
} catch (Throwable error) {
reportIssue(context, "{}: ", event, error);
this.exceptionCaught(context, error);
}
} | class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler {
private static final ClosedChannelException ON_CHANNEL_UNREGISTERED =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered");
private static final ClosedChannelException ON_CLOSE =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close");
private static final ClosedChannelException ON_DEREGISTER =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister");
private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory(
"request-expirator",
true,
Thread.NORM_PRIORITY));
private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class);
private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>();
private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>();
private final ChannelHealthChecker healthChecker;
private final int pendingRequestLimit;
private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests;
private final Timestamps timestamps = new Timestamps();
private final RntbdConnectionStateListener rntbdConnectionStateListener;
private final long idleConnectionTimerResolutionInNanos;
private boolean closingExceptionally = false;
private CoalescingBufferQueue pendingWrites;
public RntbdRequestManager(
final ChannelHealthChecker healthChecker,
final int pendingRequestLimit,
final RntbdConnectionStateListener connectionStateListener,
final long idleConnectionTimerResolutionInNanos) {
checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit);
checkNotNull(healthChecker, "healthChecker");
this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit);
this.pendingRequestLimit = pendingRequestLimit;
this.healthChecker = healthChecker;
this.rntbdConnectionStateListener = connectionStateListener;
this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos;
}
/**
* Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerAdded(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerAdded");
}
/**
* Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events
* anymore.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerRemoved(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerRemoved");
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} is now active
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelActive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelActive");
context.fireChannelActive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime
* <p>
* This method will only be called after the channel is closed.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelInactive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelInactive");
context.fireChannelInactive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs.
* @param message The message read.
*/
@Override
public void channelRead(final ChannelHandlerContext context, final Object message) {
this.traceOperation(context, "channelRead");
try {
if (message.getClass() == RntbdResponse.class) {
try {
this.messageReceived(context, (RntbdResponse) message);
} catch (CorruptedFrameException error) {
this.exceptionCaught(context, error);
} catch (Throwable throwable) {
reportIssue(context, "{} ", message, throwable);
this.exceptionCaught(context, throwable);
}
} else {
final IllegalStateException error = new IllegalStateException(
lenientFormat("expected message of %s, not %s: %s",
RntbdResponse.class,
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
} finally {
if (message instanceof ReferenceCounted) {
boolean released = ((ReferenceCounted) message).release();
reportIssueUnless(released, context, "failed to release message: {}", message);
}
}
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read.
* <p>
* If {@link ChannelOption
* {@link Channel} will be made until {@link ChannelHandlerContext
* for outbound messages to be written.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelReadComplete(final ChannelHandlerContext context) {
this.traceOperation(context, "channelReadComplete");
this.timestamps.channelReadCompleted();
context.fireChannelReadComplete();
}
/**
* Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest}
* <p>
* This method then calls {@link ChannelHandlerContext
* {@link ChannelInboundHandler} in the {@link ChannelPipeline}.
* <p>
* Sub-classes may override this method to change behavior.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
*/
@Override
public void channelRegistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelRegistered");
reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites);
this.pendingWrites = new CoalescingBufferQueue(context.channel());
context.fireChannelRegistered();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop}
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelUnregistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelUnregistered");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED);
} else {
logger.debug("{} channelUnregistered exceptionally", context);
}
context.fireChannelUnregistered();
}
/**
* Gets called once the writable state of a {@link Channel} changed. You can check the state with
* {@link Channel
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelWritabilityChanged(final ChannelHandlerContext context) {
this.traceOperation(context, "channelWritabilityChanged");
context.fireChannelWritabilityChanged();
}
/**
* Processes {@link ChannelHandlerContext
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param cause Exception caught
*/
@Override
@SuppressWarnings("deprecation")
public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) {
this.traceOperation(context, "exceptionCaught", cause);
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, cause);
if (logger.isDebugEnabled()) {
logger.debug("{} closing due to:", context, cause);
}
context.flush().close();
}
}
/**
* Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline
* <p>
* All but inbound request management events are ignored.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param event An object representing a user event
*/
@Override
/**
* Called once a bind operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
* @param localAddress the {@link SocketAddress} to which it should bound
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) {
this.traceOperation(context, "bind", localAddress);
context.bind(localAddress, promise);
}
/**
* Called once a close operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the close operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void close(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "close");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CLOSE);
} else {
logger.debug("{} closed exceptionally", context);
}
final SslHandler sslHandler = context.pipeline().get(SslHandler.class);
if (sslHandler != null) {
try {
sslHandler.closeOutbound();
} catch (Exception exception) {
if (exception instanceof SSLException) {
logger.debug(
"SslException when attempting to close the outbound SSL connection: ",
exception);
} else {
logger.warn(
"Exception when attempting to close the outbound SSL connection: ",
exception);
throw exception;
}
}
}
context.close(promise);
}
/**
* Called once a connect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the connect operation is made
* @param remoteAddress the {@link SocketAddress} to which it should connect
* @param localAddress the {@link SocketAddress} which is used as source on connect
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void connect(
final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress,
final ChannelPromise promise
) {
this.traceOperation(context, "connect", remoteAddress, localAddress);
context.connect(remoteAddress, localAddress, promise);
}
/**
* Called once a deregister operation is made from the current registered {@link EventLoop}.
*
* @param context the {@link ChannelHandlerContext} for which the deregister operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "deregister");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER);
} else {
logger.debug("{} deregistered exceptionally", context);
}
context.deregister(promise);
}
/**
* Called once a disconnect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the disconnect operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "disconnect");
context.disconnect(promise);
}
/**
* Called once a flush operation is made
* <p>
* The flush operation will try to flush out all previous written messages that are pending.
*
* @param context the {@link ChannelHandlerContext} for which the flush operation is made
*/
@Override
public void flush(final ChannelHandlerContext context) {
this.traceOperation(context, "flush");
context.flush();
}
/**
* Intercepts {@link ChannelHandlerContext
*
* @param context the {@link ChannelHandlerContext} for which the read operation is made
*/
@Override
public void read(final ChannelHandlerContext context) {
this.traceOperation(context, "read");
context.read();
}
/**
* Called once a write operation is made
* <p>
* The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed
* to the actual {@link Channel}. This will occur when {@link Channel
*
* @param context the {@link ChannelHandlerContext} for which the write operation is made
* @param message the message to write
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) {
this.traceOperation(context, "write", message);
if (message instanceof RntbdRequestRecord) {
final RntbdRequestRecord record = (RntbdRequestRecord) message;
this.timestamps.channelWriteAttempted();
record.setSendingRequestHasStarted();
context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> {
record.stage(RntbdRequestRecord.Stage.SENT);
if (completed.isSuccess()) {
this.timestamps.channelWriteCompleted();
}
});
return;
}
if (message == RntbdHealthCheckRequest.MESSAGE) {
context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> {
if (completed.isSuccess()) {
this.timestamps.channelPingCompleted();
}
});
return;
}
final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s",
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
int pendingRequestCount() {
return this.pendingRequests.size();
}
Optional<RntbdContext> rntbdContext() {
return Optional.of(this.contextFuture.getNow(null));
}
CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() {
return this.contextRequestFuture;
}
boolean hasRequestedRntbdContext() {
return this.contextRequestFuture.getNow(null) != null;
}
boolean hasRntbdContext() {
return this.contextFuture.getNow(null) != null;
}
RntbdChannelState getChannelState(final int demand) {
reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued");
final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand);
if (this.pendingRequests.size() < limit) {
return RntbdChannelState.ok(this.pendingRequests.size());
}
if (this.hasRntbdContext()) {
return RntbdChannelState.pendingLimit(this.pendingRequests.size());
} else {
return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size()));
}
}
void pendWrite(final ByteBuf out, final ChannelPromise promise) {
this.pendingWrites.add(out, promise);
}
Timestamps snapshotTimestamps() {
return new Timestamps(this.timestamps);
}
private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) {
return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> {
reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record);
record.pendingRequestQueueSize(pendingRequests.size());
final Timeout pendingRequestTimeout = record.newTimeout(timeout -> {
requestExpirationExecutor.execute(record::expire);
});
record.whenComplete((response, error) -> {
this.pendingRequests.remove(id);
pendingRequestTimeout.cancel();
});
return record;
});
}
private void completeAllPendingRequestsExceptionally(
final ChannelHandlerContext context, final Throwable throwable
) {
reportIssueUnless(!this.closingExceptionally, context, "", throwable);
this.closingExceptionally = true;
if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) {
this.pendingWrites.releaseAndFailAll(context, throwable);
}
if (this.rntbdConnectionStateListener != null) {
this.rntbdConnectionStateListener.onException(throwable);
}
if (this.pendingRequests.isEmpty()) {
return;
}
if (!this.contextRequestFuture.isDone()) {
this.contextRequestFuture.completeExceptionally(throwable);
}
if (!this.contextFuture.isDone()) {
this.contextFuture.completeExceptionally(throwable);
}
final int count = this.pendingRequests.size();
Exception contextRequestException = null;
String phrase = null;
if (this.contextRequestFuture.isCompletedExceptionally()) {
try {
this.contextRequestFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request write cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request write failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request write failed";
contextRequestException = new ChannelException(error);
}
} else if (this.contextFuture.isCompletedExceptionally()) {
try {
this.contextFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request read cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request read failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request read failed";
contextRequestException = new ChannelException(error);
}
} else {
phrase = "closed exceptionally";
}
final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count);
final Exception cause;
if (throwable instanceof ClosedChannelException) {
cause = contextRequestException == null
? (ClosedChannelException) throwable
: contextRequestException;
} else {
cause = throwable instanceof Exception
? (Exception) throwable
: new ChannelException(throwable);
}
for (RntbdRequestRecord record : this.pendingRequests.values()) {
final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders();
final String requestUri = record.args().physicalAddress().toString();
final GoneException error = new GoneException(message, cause, null, requestUri);
BridgeInternal.setRequestHeaders(error, requestHeaders);
record.completeExceptionally(error);
}
}
/**
* This method is called for each incoming message of type {@link RntbdResponse} to complete a request.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs.
* @param response the {@link RntbdResponse message} received.
*/
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) {
final Long transportRequestId = response.getTransportRequestId();
if (transportRequestId == null) {
reportIssue(context, "response ignored because its transportRequestId is missing: {}", response);
return;
}
final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId);
if (requestRecord == null) {
logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response);
return;
}
requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime());
requestRecord.stage(
RntbdRequestRecord.Stage.RECEIVED,
response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now());
requestRecord.responseLength(response.getMessageLength());
final HttpResponseStatus status = response.getStatus();
final UUID activityId = response.getActivityId();
final int statusCode = status.code();
if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) ||
statusCode == HttpResponseStatus.NOT_MODIFIED.code()) {
final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null));
requestRecord.complete(storeResponse);
} else {
final CosmosException cause;
final long lsn = response.getHeader(RntbdResponseHeader.LSN);
final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId);
final CosmosError error = response.hasPayload()
? new CosmosError(RntbdObjectMapper.readTree(response))
: new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name());
final Map<String, String> responseHeaders = response.getHeaders().asMap(
this.rntbdContext().orElseThrow(IllegalStateException::new), activityId
);
final String resourceAddress = requestRecord.args().physicalAddress() != null ?
requestRecord.args().physicalAddress().toString() : null;
switch (status.code()) {
case StatusCodes.BADREQUEST:
cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.CONFLICT:
cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.FORBIDDEN:
cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.GONE:
final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus));
switch (subStatusCode) {
case SubStatusCodes.COMPLETING_SPLIT:
cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.COMPLETING_PARTITION_MIGRATION:
cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.NAME_CACHE_IS_STALE:
cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.PARTITION_KEY_RANGE_GONE:
cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
GoneException goneExceptionFromService =
new GoneException(error, lsn, partitionKeyRangeId, responseHeaders);
goneExceptionFromService.setIsBasedOn410ResponseFromService();
cause = goneExceptionFromService;
break;
}
break;
case StatusCodes.INTERNAL_SERVER_ERROR:
cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.LOCKED:
cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.METHOD_NOT_ALLOWED:
cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.NOTFOUND:
cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.PRECONDITION_FAILED:
cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_ENTITY_TOO_LARGE:
cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_TIMEOUT:
Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders);
cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner);
break;
case StatusCodes.RETRY_WITH:
cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.SERVICE_UNAVAILABLE:
cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.TOO_MANY_REQUESTS:
cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.UNAUTHORIZED:
cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders);
break;
}
BridgeInternal.setResourceAddress(cause, resourceAddress);
requestRecord.completeExceptionally(cause);
}
}
private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) {
final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class);
negotiator.removeInboundHandler();
negotiator.removeOutboundHandler();
if (!this.pendingWrites.isEmpty()) {
this.pendingWrites.writeAndRemoveAll(context);
context.flush();
}
}
private static void reportIssue(final Object subject, final String format, final Object... args) {
RntbdReporter.reportIssue(logger, subject, format, args);
}
private static void reportIssueUnless(
final boolean predicate, final Object subject, final String format, final Object... args
) {
RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args);
}
private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) {
logger.trace("{}\n{}\n{}", operationName, context, args);
}
final static class UnhealthyChannelException extends ChannelException {
UnhealthyChannelException(String reason) {
super("health check failed, reason: " + reason);
}
@Override
public Throwable fillInStackTrace() {
return this;
}
}
} | class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler {
private static final ClosedChannelException ON_CHANNEL_UNREGISTERED =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered");
private static final ClosedChannelException ON_CLOSE =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close");
private static final ClosedChannelException ON_DEREGISTER =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister");
private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory(
"request-expirator",
true,
Thread.NORM_PRIORITY));
private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class);
private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>();
private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>();
private final ChannelHealthChecker healthChecker;
private final int pendingRequestLimit;
private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests;
private final Timestamps timestamps = new Timestamps();
private final RntbdConnectionStateListener rntbdConnectionStateListener;
private final long idleConnectionTimerResolutionInNanos;
private boolean closingExceptionally = false;
private CoalescingBufferQueue pendingWrites;
public RntbdRequestManager(
final ChannelHealthChecker healthChecker,
final int pendingRequestLimit,
final RntbdConnectionStateListener connectionStateListener,
final long idleConnectionTimerResolutionInNanos) {
checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit);
checkNotNull(healthChecker, "healthChecker");
this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit);
this.pendingRequestLimit = pendingRequestLimit;
this.healthChecker = healthChecker;
this.rntbdConnectionStateListener = connectionStateListener;
this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos;
}
/**
* Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerAdded(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerAdded");
}
/**
* Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events
* anymore.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerRemoved(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerRemoved");
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} is now active
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelActive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelActive");
context.fireChannelActive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime
* <p>
* This method will only be called after the channel is closed.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelInactive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelInactive");
context.fireChannelInactive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs.
* @param message The message read.
*/
@Override
public void channelRead(final ChannelHandlerContext context, final Object message) {
this.traceOperation(context, "channelRead");
try {
if (message.getClass() == RntbdResponse.class) {
try {
this.messageReceived(context, (RntbdResponse) message);
} catch (CorruptedFrameException error) {
this.exceptionCaught(context, error);
} catch (Throwable throwable) {
reportIssue(context, "{} ", message, throwable);
this.exceptionCaught(context, throwable);
}
} else {
final IllegalStateException error = new IllegalStateException(
lenientFormat("expected message of %s, not %s: %s",
RntbdResponse.class,
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
} finally {
if (message instanceof ReferenceCounted) {
boolean released = ((ReferenceCounted) message).release();
reportIssueUnless(released, context, "failed to release message: {}", message);
}
}
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read.
* <p>
* If {@link ChannelOption
* {@link Channel} will be made until {@link ChannelHandlerContext
* for outbound messages to be written.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelReadComplete(final ChannelHandlerContext context) {
this.traceOperation(context, "channelReadComplete");
this.timestamps.channelReadCompleted();
context.fireChannelReadComplete();
}
/**
* Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest}
* <p>
* This method then calls {@link ChannelHandlerContext
* {@link ChannelInboundHandler} in the {@link ChannelPipeline}.
* <p>
* Sub-classes may override this method to change behavior.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
*/
@Override
public void channelRegistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelRegistered");
reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites);
this.pendingWrites = new CoalescingBufferQueue(context.channel());
context.fireChannelRegistered();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop}
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelUnregistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelUnregistered");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED);
} else {
logger.debug("{} channelUnregistered exceptionally", context);
}
context.fireChannelUnregistered();
}
/**
* Gets called once the writable state of a {@link Channel} changed. You can check the state with
* {@link Channel
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelWritabilityChanged(final ChannelHandlerContext context) {
this.traceOperation(context, "channelWritabilityChanged");
context.fireChannelWritabilityChanged();
}
/**
* Processes {@link ChannelHandlerContext
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param cause Exception caught
*/
@Override
@SuppressWarnings("deprecation")
public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) {
this.traceOperation(context, "exceptionCaught", cause);
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, cause);
if (logger.isDebugEnabled()) {
logger.debug("{} closing due to:", context, cause);
}
context.flush().close();
}
}
/**
* Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline
* <p>
* All but inbound request management events are ignored.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param event An object representing a user event
*/
@Override
/**
* Called once a bind operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
* @param localAddress the {@link SocketAddress} to which it should bound
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) {
this.traceOperation(context, "bind", localAddress);
context.bind(localAddress, promise);
}
/**
* Called once a close operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the close operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void close(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "close");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CLOSE);
} else {
logger.debug("{} closed exceptionally", context);
}
final SslHandler sslHandler = context.pipeline().get(SslHandler.class);
if (sslHandler != null) {
try {
sslHandler.closeOutbound();
} catch (Exception exception) {
if (exception instanceof SSLException) {
logger.debug(
"SslException when attempting to close the outbound SSL connection: ",
exception);
} else {
logger.warn(
"Exception when attempting to close the outbound SSL connection: ",
exception);
throw exception;
}
}
}
context.close(promise);
}
/**
* Called once a connect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the connect operation is made
* @param remoteAddress the {@link SocketAddress} to which it should connect
* @param localAddress the {@link SocketAddress} which is used as source on connect
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void connect(
final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress,
final ChannelPromise promise
) {
this.traceOperation(context, "connect", remoteAddress, localAddress);
context.connect(remoteAddress, localAddress, promise);
}
/**
* Called once a deregister operation is made from the current registered {@link EventLoop}.
*
* @param context the {@link ChannelHandlerContext} for which the deregister operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "deregister");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER);
} else {
logger.debug("{} deregistered exceptionally", context);
}
context.deregister(promise);
}
/**
* Called once a disconnect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the disconnect operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "disconnect");
context.disconnect(promise);
}
/**
* Called once a flush operation is made
* <p>
* The flush operation will try to flush out all previous written messages that are pending.
*
* @param context the {@link ChannelHandlerContext} for which the flush operation is made
*/
@Override
public void flush(final ChannelHandlerContext context) {
this.traceOperation(context, "flush");
context.flush();
}
/**
* Intercepts {@link ChannelHandlerContext
*
* @param context the {@link ChannelHandlerContext} for which the read operation is made
*/
@Override
public void read(final ChannelHandlerContext context) {
this.traceOperation(context, "read");
context.read();
}
/**
* Called once a write operation is made
* <p>
* The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed
* to the actual {@link Channel}. This will occur when {@link Channel
*
* @param context the {@link ChannelHandlerContext} for which the write operation is made
* @param message the message to write
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) {
this.traceOperation(context, "write", message);
if (message instanceof RntbdRequestRecord) {
final RntbdRequestRecord record = (RntbdRequestRecord) message;
this.timestamps.channelWriteAttempted();
record.setSendingRequestHasStarted();
context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> {
record.stage(RntbdRequestRecord.Stage.SENT);
if (completed.isSuccess()) {
this.timestamps.channelWriteCompleted();
}
});
return;
}
if (message == RntbdHealthCheckRequest.MESSAGE) {
context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> {
if (completed.isSuccess()) {
this.timestamps.channelPingCompleted();
}
});
return;
}
final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s",
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
int pendingRequestCount() {
return this.pendingRequests.size();
}
Optional<RntbdContext> rntbdContext() {
return Optional.of(this.contextFuture.getNow(null));
}
CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() {
return this.contextRequestFuture;
}
boolean hasRequestedRntbdContext() {
return this.contextRequestFuture.getNow(null) != null;
}
boolean hasRntbdContext() {
return this.contextFuture.getNow(null) != null;
}
RntbdChannelState getChannelState(final int demand) {
reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued");
final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand);
if (this.pendingRequests.size() < limit) {
return RntbdChannelState.ok(this.pendingRequests.size());
}
if (this.hasRntbdContext()) {
return RntbdChannelState.pendingLimit(this.pendingRequests.size());
} else {
return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size()));
}
}
void pendWrite(final ByteBuf out, final ChannelPromise promise) {
this.pendingWrites.add(out, promise);
}
Timestamps snapshotTimestamps() {
return new Timestamps(this.timestamps);
}
private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) {
return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> {
reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record);
record.pendingRequestQueueSize(pendingRequests.size());
final Timeout pendingRequestTimeout = record.newTimeout(timeout -> {
requestExpirationExecutor.execute(record::expire);
});
record.whenComplete((response, error) -> {
this.pendingRequests.remove(id);
pendingRequestTimeout.cancel();
});
return record;
});
}
private void completeAllPendingRequestsExceptionally(
final ChannelHandlerContext context, final Throwable throwable
) {
reportIssueUnless(!this.closingExceptionally, context, "", throwable);
this.closingExceptionally = true;
if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) {
this.pendingWrites.releaseAndFailAll(context, throwable);
}
if (this.rntbdConnectionStateListener != null) {
this.rntbdConnectionStateListener.onException(throwable);
}
if (this.pendingRequests.isEmpty()) {
return;
}
if (!this.contextRequestFuture.isDone()) {
this.contextRequestFuture.completeExceptionally(throwable);
}
if (!this.contextFuture.isDone()) {
this.contextFuture.completeExceptionally(throwable);
}
final int count = this.pendingRequests.size();
Exception contextRequestException = null;
String phrase = null;
if (this.contextRequestFuture.isCompletedExceptionally()) {
try {
this.contextRequestFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request write cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request write failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request write failed";
contextRequestException = new ChannelException(error);
}
} else if (this.contextFuture.isCompletedExceptionally()) {
try {
this.contextFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request read cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request read failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request read failed";
contextRequestException = new ChannelException(error);
}
} else {
phrase = "closed exceptionally";
}
final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count);
final Exception cause;
if (throwable instanceof ClosedChannelException) {
cause = contextRequestException == null
? (ClosedChannelException) throwable
: contextRequestException;
} else {
cause = throwable instanceof Exception
? (Exception) throwable
: new ChannelException(throwable);
}
for (RntbdRequestRecord record : this.pendingRequests.values()) {
final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders();
final String requestUri = record.args().physicalAddress().toString();
final GoneException error = new GoneException(message, cause, null, requestUri);
BridgeInternal.setRequestHeaders(error, requestHeaders);
record.completeExceptionally(error);
}
}
/**
* This method is called for each incoming message of type {@link RntbdResponse} to complete a request.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs.
* @param response the {@link RntbdResponse message} received.
*/
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) {
final Long transportRequestId = response.getTransportRequestId();
if (transportRequestId == null) {
reportIssue(context, "response ignored because its transportRequestId is missing: {}", response);
return;
}
final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId);
if (requestRecord == null) {
logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response);
return;
}
requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime());
requestRecord.stage(
RntbdRequestRecord.Stage.RECEIVED,
response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now());
requestRecord.responseLength(response.getMessageLength());
final HttpResponseStatus status = response.getStatus();
final UUID activityId = response.getActivityId();
final int statusCode = status.code();
if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) ||
statusCode == HttpResponseStatus.NOT_MODIFIED.code()) {
final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null));
requestRecord.complete(storeResponse);
} else {
final CosmosException cause;
final long lsn = response.getHeader(RntbdResponseHeader.LSN);
final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId);
final CosmosError error = response.hasPayload()
? new CosmosError(RntbdObjectMapper.readTree(response))
: new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name());
final Map<String, String> responseHeaders = response.getHeaders().asMap(
this.rntbdContext().orElseThrow(IllegalStateException::new), activityId
);
final String resourceAddress = requestRecord.args().physicalAddress() != null ?
requestRecord.args().physicalAddress().toString() : null;
switch (status.code()) {
case StatusCodes.BADREQUEST:
cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.CONFLICT:
cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.FORBIDDEN:
cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.GONE:
final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus));
switch (subStatusCode) {
case SubStatusCodes.COMPLETING_SPLIT:
cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.COMPLETING_PARTITION_MIGRATION:
cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.NAME_CACHE_IS_STALE:
cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.PARTITION_KEY_RANGE_GONE:
cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
GoneException goneExceptionFromService =
new GoneException(error, lsn, partitionKeyRangeId, responseHeaders);
goneExceptionFromService.setIsBasedOn410ResponseFromService();
cause = goneExceptionFromService;
break;
}
break;
case StatusCodes.INTERNAL_SERVER_ERROR:
cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.LOCKED:
cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.METHOD_NOT_ALLOWED:
cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.NOTFOUND:
cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.PRECONDITION_FAILED:
cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_ENTITY_TOO_LARGE:
cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_TIMEOUT:
Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders);
cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner);
break;
case StatusCodes.RETRY_WITH:
cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.SERVICE_UNAVAILABLE:
cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.TOO_MANY_REQUESTS:
cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.UNAUTHORIZED:
cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders);
break;
}
BridgeInternal.setResourceAddress(cause, resourceAddress);
requestRecord.completeExceptionally(cause);
}
}
private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) {
final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class);
negotiator.removeInboundHandler();
negotiator.removeOutboundHandler();
if (!this.pendingWrites.isEmpty()) {
this.pendingWrites.writeAndRemoveAll(context);
context.flush();
}
}
private static void reportIssue(final Object subject, final String format, final Object... args) {
RntbdReporter.reportIssue(logger, subject, format, args);
}
private static void reportIssueUnless(
final boolean predicate, final Object subject, final String format, final Object... args
) {
RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args);
}
private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) {
logger.trace("{}\n{}\n{}", operationName, context, args);
}
final static class UnhealthyChannelException extends ChannelException {
UnhealthyChannelException(String reason) {
super("health check failed, reason: " + reason);
}
@Override
public Throwable fillInStackTrace() {
return this;
}
}
} |
Fixed | public void userEventTriggered(final ChannelHandlerContext context, final Object event) {
this.traceOperation(context, "userEventTriggered", event);
try {
if (event instanceof IdleStateEvent) {
if (this.healthChecker instanceof RntbdClientChannelHealthChecker) {
((RntbdClientChannelHealthChecker) this.healthChecker)
.isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) {
return;
}
cause = new UnhealthyChannelException(future.get());
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
} else {
this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (future.get()) {
return;
}
cause = new UnhealthyChannelException(
MessageFormat.format(
"Custom ChannelHealthChecker {0} failed.",
this.healthChecker.getClass().getSimpleName()));
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
}
return;
}
if (event instanceof RntbdContext) {
this.contextFuture.complete((RntbdContext) event);
this.removeContextNegotiatorAndFlushPendingWrites(context);
return;
}
if (event instanceof RntbdContextException) {
this.contextFuture.completeExceptionally((RntbdContextException) event);
context.pipeline().flush().close();
return;
}
if (event instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event;
if (!sslHandshakeCompletionEvent.isSuccess()) {
if (logger.isDebugEnabled()) {
logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause());
}
this.exceptionCaught(context, sslHandshakeCompletionEvent.cause());
return;
} else {
logger.info("adding idleStateHandler");
context.pipeline().addFirst(
new IdleStateHandler(
this.idleConnectionTimerResolutionInNanos,
this.idleConnectionTimerResolutionInNanos,
0,
TimeUnit.NANOSECONDS));
}
}
context.fireUserEventTriggered(event);
} catch (Throwable error) {
reportIssue(context, "{}: ", event, error);
this.exceptionCaught(context, error);
}
} | new IdleStateHandler( | public void userEventTriggered(final ChannelHandlerContext context, final Object event) {
this.traceOperation(context, "userEventTriggered", event);
try {
if (event instanceof IdleStateEvent) {
if (this.healthChecker instanceof RntbdClientChannelHealthChecker) {
((RntbdClientChannelHealthChecker) this.healthChecker)
.isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) {
return;
}
cause = new UnhealthyChannelException(future.get());
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
} else {
this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> {
final Throwable cause;
if (future.isSuccess()) {
if (future.get()) {
return;
}
cause = new UnhealthyChannelException(
MessageFormat.format(
"Custom ChannelHealthChecker {0} failed.",
this.healthChecker.getClass().getSimpleName()));
} else {
cause = future.cause();
}
this.exceptionCaught(context, cause);
});
}
return;
}
if (event instanceof RntbdContext) {
this.contextFuture.complete((RntbdContext) event);
this.removeContextNegotiatorAndFlushPendingWrites(context);
return;
}
if (event instanceof RntbdContextException) {
this.contextFuture.completeExceptionally((RntbdContextException) event);
this.exceptionCaught(context, (RntbdContextException)event);
return;
}
if (event instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event;
if (sslHandshakeCompletionEvent.isSuccess()) {
if (logger.isDebugEnabled()) {
logger.debug("SslHandshake completed, adding idleStateHandler");
}
context.pipeline().addAfter(
SslHandler.class.toString(),
IdleStateHandler.class.toString(),
new IdleStateHandler(
this.idleConnectionTimerResolutionInNanos,
this.idleConnectionTimerResolutionInNanos,
0,
TimeUnit.NANOSECONDS));
} else {
if (logger.isDebugEnabled()) {
logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause());
}
this.exceptionCaught(context, sslHandshakeCompletionEvent.cause());
return;
}
}
context.fireUserEventTriggered(event);
} catch (Throwable error) {
reportIssue(context, "{}: ", event, error);
this.exceptionCaught(context, error);
}
} | class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler {
private static final ClosedChannelException ON_CHANNEL_UNREGISTERED =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered");
private static final ClosedChannelException ON_CLOSE =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close");
private static final ClosedChannelException ON_DEREGISTER =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister");
private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory(
"request-expirator",
true,
Thread.NORM_PRIORITY));
private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class);
private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>();
private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>();
private final ChannelHealthChecker healthChecker;
private final int pendingRequestLimit;
private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests;
private final Timestamps timestamps = new Timestamps();
private final RntbdConnectionStateListener rntbdConnectionStateListener;
private final long idleConnectionTimerResolutionInNanos;
private boolean closingExceptionally = false;
private CoalescingBufferQueue pendingWrites;
public RntbdRequestManager(
final ChannelHealthChecker healthChecker,
final int pendingRequestLimit,
final RntbdConnectionStateListener connectionStateListener,
final long idleConnectionTimerResolutionInNanos) {
checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit);
checkNotNull(healthChecker, "healthChecker");
this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit);
this.pendingRequestLimit = pendingRequestLimit;
this.healthChecker = healthChecker;
this.rntbdConnectionStateListener = connectionStateListener;
this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos;
}
/**
* Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerAdded(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerAdded");
}
/**
* Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events
* anymore.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerRemoved(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerRemoved");
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} is now active
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelActive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelActive");
context.fireChannelActive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime
* <p>
* This method will only be called after the channel is closed.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelInactive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelInactive");
context.fireChannelInactive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs.
* @param message The message read.
*/
@Override
public void channelRead(final ChannelHandlerContext context, final Object message) {
this.traceOperation(context, "channelRead");
try {
if (message.getClass() == RntbdResponse.class) {
try {
this.messageReceived(context, (RntbdResponse) message);
} catch (CorruptedFrameException error) {
this.exceptionCaught(context, error);
} catch (Throwable throwable) {
reportIssue(context, "{} ", message, throwable);
this.exceptionCaught(context, throwable);
}
} else {
final IllegalStateException error = new IllegalStateException(
lenientFormat("expected message of %s, not %s: %s",
RntbdResponse.class,
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
} finally {
if (message instanceof ReferenceCounted) {
boolean released = ((ReferenceCounted) message).release();
reportIssueUnless(released, context, "failed to release message: {}", message);
}
}
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read.
* <p>
* If {@link ChannelOption
* {@link Channel} will be made until {@link ChannelHandlerContext
* for outbound messages to be written.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelReadComplete(final ChannelHandlerContext context) {
this.traceOperation(context, "channelReadComplete");
this.timestamps.channelReadCompleted();
context.fireChannelReadComplete();
}
/**
* Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest}
* <p>
* This method then calls {@link ChannelHandlerContext
* {@link ChannelInboundHandler} in the {@link ChannelPipeline}.
* <p>
* Sub-classes may override this method to change behavior.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
*/
@Override
public void channelRegistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelRegistered");
reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites);
this.pendingWrites = new CoalescingBufferQueue(context.channel());
context.fireChannelRegistered();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop}
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelUnregistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelUnregistered");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED);
} else {
logger.debug("{} channelUnregistered exceptionally", context);
}
context.fireChannelUnregistered();
}
/**
* Gets called once the writable state of a {@link Channel} changed. You can check the state with
* {@link Channel
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelWritabilityChanged(final ChannelHandlerContext context) {
this.traceOperation(context, "channelWritabilityChanged");
context.fireChannelWritabilityChanged();
}
/**
* Processes {@link ChannelHandlerContext
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param cause Exception caught
*/
@Override
@SuppressWarnings("deprecation")
public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) {
this.traceOperation(context, "exceptionCaught", cause);
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, cause);
if (logger.isDebugEnabled()) {
logger.debug("{} closing due to:", context, cause);
}
context.flush().close();
}
}
/**
* Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline
* <p>
* All but inbound request management events are ignored.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param event An object representing a user event
*/
@Override
/**
* Called once a bind operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
* @param localAddress the {@link SocketAddress} to which it should bound
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) {
this.traceOperation(context, "bind", localAddress);
context.bind(localAddress, promise);
}
/**
* Called once a close operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the close operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void close(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "close");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CLOSE);
} else {
logger.debug("{} closed exceptionally", context);
}
final SslHandler sslHandler = context.pipeline().get(SslHandler.class);
if (sslHandler != null) {
try {
sslHandler.closeOutbound();
} catch (Exception exception) {
if (exception instanceof SSLException) {
logger.debug(
"SslException when attempting to close the outbound SSL connection: ",
exception);
} else {
logger.warn(
"Exception when attempting to close the outbound SSL connection: ",
exception);
throw exception;
}
}
}
context.close(promise);
}
/**
* Called once a connect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the connect operation is made
* @param remoteAddress the {@link SocketAddress} to which it should connect
* @param localAddress the {@link SocketAddress} which is used as source on connect
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void connect(
final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress,
final ChannelPromise promise
) {
this.traceOperation(context, "connect", remoteAddress, localAddress);
context.connect(remoteAddress, localAddress, promise);
}
/**
* Called once a deregister operation is made from the current registered {@link EventLoop}.
*
* @param context the {@link ChannelHandlerContext} for which the deregister operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "deregister");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER);
} else {
logger.debug("{} deregistered exceptionally", context);
}
context.deregister(promise);
}
/**
* Called once a disconnect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the disconnect operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "disconnect");
context.disconnect(promise);
}
/**
* Called once a flush operation is made
* <p>
* The flush operation will try to flush out all previous written messages that are pending.
*
* @param context the {@link ChannelHandlerContext} for which the flush operation is made
*/
@Override
public void flush(final ChannelHandlerContext context) {
this.traceOperation(context, "flush");
context.flush();
}
/**
* Intercepts {@link ChannelHandlerContext
*
* @param context the {@link ChannelHandlerContext} for which the read operation is made
*/
@Override
public void read(final ChannelHandlerContext context) {
this.traceOperation(context, "read");
context.read();
}
/**
* Called once a write operation is made
* <p>
* The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed
* to the actual {@link Channel}. This will occur when {@link Channel
*
* @param context the {@link ChannelHandlerContext} for which the write operation is made
* @param message the message to write
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) {
this.traceOperation(context, "write", message);
if (message instanceof RntbdRequestRecord) {
final RntbdRequestRecord record = (RntbdRequestRecord) message;
this.timestamps.channelWriteAttempted();
record.setSendingRequestHasStarted();
context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> {
record.stage(RntbdRequestRecord.Stage.SENT);
if (completed.isSuccess()) {
this.timestamps.channelWriteCompleted();
}
});
return;
}
if (message == RntbdHealthCheckRequest.MESSAGE) {
context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> {
if (completed.isSuccess()) {
this.timestamps.channelPingCompleted();
}
});
return;
}
final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s",
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
int pendingRequestCount() {
return this.pendingRequests.size();
}
Optional<RntbdContext> rntbdContext() {
return Optional.of(this.contextFuture.getNow(null));
}
CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() {
return this.contextRequestFuture;
}
boolean hasRequestedRntbdContext() {
return this.contextRequestFuture.getNow(null) != null;
}
boolean hasRntbdContext() {
return this.contextFuture.getNow(null) != null;
}
RntbdChannelState getChannelState(final int demand) {
reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued");
final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand);
if (this.pendingRequests.size() < limit) {
return RntbdChannelState.ok(this.pendingRequests.size());
}
if (this.hasRntbdContext()) {
return RntbdChannelState.pendingLimit(this.pendingRequests.size());
} else {
return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size()));
}
}
void pendWrite(final ByteBuf out, final ChannelPromise promise) {
this.pendingWrites.add(out, promise);
}
Timestamps snapshotTimestamps() {
return new Timestamps(this.timestamps);
}
private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) {
return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> {
reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record);
record.pendingRequestQueueSize(pendingRequests.size());
final Timeout pendingRequestTimeout = record.newTimeout(timeout -> {
requestExpirationExecutor.execute(record::expire);
});
record.whenComplete((response, error) -> {
this.pendingRequests.remove(id);
pendingRequestTimeout.cancel();
});
return record;
});
}
private void completeAllPendingRequestsExceptionally(
final ChannelHandlerContext context, final Throwable throwable
) {
reportIssueUnless(!this.closingExceptionally, context, "", throwable);
this.closingExceptionally = true;
if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) {
this.pendingWrites.releaseAndFailAll(context, throwable);
}
if (this.rntbdConnectionStateListener != null) {
this.rntbdConnectionStateListener.onException(throwable);
}
if (this.pendingRequests.isEmpty()) {
return;
}
if (!this.contextRequestFuture.isDone()) {
this.contextRequestFuture.completeExceptionally(throwable);
}
if (!this.contextFuture.isDone()) {
this.contextFuture.completeExceptionally(throwable);
}
final int count = this.pendingRequests.size();
Exception contextRequestException = null;
String phrase = null;
if (this.contextRequestFuture.isCompletedExceptionally()) {
try {
this.contextRequestFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request write cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request write failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request write failed";
contextRequestException = new ChannelException(error);
}
} else if (this.contextFuture.isCompletedExceptionally()) {
try {
this.contextFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request read cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request read failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request read failed";
contextRequestException = new ChannelException(error);
}
} else {
phrase = "closed exceptionally";
}
final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count);
final Exception cause;
if (throwable instanceof ClosedChannelException) {
cause = contextRequestException == null
? (ClosedChannelException) throwable
: contextRequestException;
} else {
cause = throwable instanceof Exception
? (Exception) throwable
: new ChannelException(throwable);
}
for (RntbdRequestRecord record : this.pendingRequests.values()) {
final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders();
final String requestUri = record.args().physicalAddress().toString();
final GoneException error = new GoneException(message, cause, null, requestUri);
BridgeInternal.setRequestHeaders(error, requestHeaders);
record.completeExceptionally(error);
}
}
/**
* This method is called for each incoming message of type {@link RntbdResponse} to complete a request.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs.
* @param response the {@link RntbdResponse message} received.
*/
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) {
final Long transportRequestId = response.getTransportRequestId();
if (transportRequestId == null) {
reportIssue(context, "response ignored because its transportRequestId is missing: {}", response);
return;
}
final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId);
if (requestRecord == null) {
logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response);
return;
}
requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime());
requestRecord.stage(
RntbdRequestRecord.Stage.RECEIVED,
response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now());
requestRecord.responseLength(response.getMessageLength());
final HttpResponseStatus status = response.getStatus();
final UUID activityId = response.getActivityId();
final int statusCode = status.code();
if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) ||
statusCode == HttpResponseStatus.NOT_MODIFIED.code()) {
final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null));
requestRecord.complete(storeResponse);
} else {
final CosmosException cause;
final long lsn = response.getHeader(RntbdResponseHeader.LSN);
final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId);
final CosmosError error = response.hasPayload()
? new CosmosError(RntbdObjectMapper.readTree(response))
: new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name());
final Map<String, String> responseHeaders = response.getHeaders().asMap(
this.rntbdContext().orElseThrow(IllegalStateException::new), activityId
);
final String resourceAddress = requestRecord.args().physicalAddress() != null ?
requestRecord.args().physicalAddress().toString() : null;
switch (status.code()) {
case StatusCodes.BADREQUEST:
cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.CONFLICT:
cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.FORBIDDEN:
cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.GONE:
final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus));
switch (subStatusCode) {
case SubStatusCodes.COMPLETING_SPLIT:
cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.COMPLETING_PARTITION_MIGRATION:
cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.NAME_CACHE_IS_STALE:
cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.PARTITION_KEY_RANGE_GONE:
cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
GoneException goneExceptionFromService =
new GoneException(error, lsn, partitionKeyRangeId, responseHeaders);
goneExceptionFromService.setIsBasedOn410ResponseFromService();
cause = goneExceptionFromService;
break;
}
break;
case StatusCodes.INTERNAL_SERVER_ERROR:
cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.LOCKED:
cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.METHOD_NOT_ALLOWED:
cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.NOTFOUND:
cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.PRECONDITION_FAILED:
cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_ENTITY_TOO_LARGE:
cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_TIMEOUT:
Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders);
cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner);
break;
case StatusCodes.RETRY_WITH:
cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.SERVICE_UNAVAILABLE:
cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.TOO_MANY_REQUESTS:
cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.UNAUTHORIZED:
cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders);
break;
}
BridgeInternal.setResourceAddress(cause, resourceAddress);
requestRecord.completeExceptionally(cause);
}
}
private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) {
final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class);
negotiator.removeInboundHandler();
negotiator.removeOutboundHandler();
if (!this.pendingWrites.isEmpty()) {
this.pendingWrites.writeAndRemoveAll(context);
context.flush();
}
}
private static void reportIssue(final Object subject, final String format, final Object... args) {
RntbdReporter.reportIssue(logger, subject, format, args);
}
private static void reportIssueUnless(
final boolean predicate, final Object subject, final String format, final Object... args
) {
RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args);
}
private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) {
logger.trace("{}\n{}\n{}", operationName, context, args);
}
final static class UnhealthyChannelException extends ChannelException {
UnhealthyChannelException(String reason) {
super("health check failed, reason: " + reason);
}
@Override
public Throwable fillInStackTrace() {
return this;
}
}
} | class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler {
private static final ClosedChannelException ON_CHANNEL_UNREGISTERED =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered");
private static final ClosedChannelException ON_CLOSE =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close");
private static final ClosedChannelException ON_DEREGISTER =
ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister");
private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory(
"request-expirator",
true,
Thread.NORM_PRIORITY));
private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class);
private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>();
private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>();
private final ChannelHealthChecker healthChecker;
private final int pendingRequestLimit;
private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests;
private final Timestamps timestamps = new Timestamps();
private final RntbdConnectionStateListener rntbdConnectionStateListener;
private final long idleConnectionTimerResolutionInNanos;
private boolean closingExceptionally = false;
private CoalescingBufferQueue pendingWrites;
public RntbdRequestManager(
final ChannelHealthChecker healthChecker,
final int pendingRequestLimit,
final RntbdConnectionStateListener connectionStateListener,
final long idleConnectionTimerResolutionInNanos) {
checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit);
checkNotNull(healthChecker, "healthChecker");
this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit);
this.pendingRequestLimit = pendingRequestLimit;
this.healthChecker = healthChecker;
this.rntbdConnectionStateListener = connectionStateListener;
this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos;
}
/**
* Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerAdded(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerAdded");
}
/**
* Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events
* anymore.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void handlerRemoved(final ChannelHandlerContext context) {
this.traceOperation(context, "handlerRemoved");
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} is now active
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelActive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelActive");
context.fireChannelActive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime
* <p>
* This method will only be called after the channel is closed.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelInactive(final ChannelHandlerContext context) {
this.traceOperation(context, "channelInactive");
context.fireChannelInactive();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs.
* @param message The message read.
*/
@Override
public void channelRead(final ChannelHandlerContext context, final Object message) {
this.traceOperation(context, "channelRead");
try {
if (message.getClass() == RntbdResponse.class) {
try {
this.messageReceived(context, (RntbdResponse) message);
} catch (CorruptedFrameException error) {
this.exceptionCaught(context, error);
} catch (Throwable throwable) {
reportIssue(context, "{} ", message, throwable);
this.exceptionCaught(context, throwable);
}
} else {
final IllegalStateException error = new IllegalStateException(
lenientFormat("expected message of %s, not %s: %s",
RntbdResponse.class,
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
} finally {
if (message instanceof ReferenceCounted) {
boolean released = ((ReferenceCounted) message).release();
reportIssueUnless(released, context, "failed to release message: {}", message);
}
}
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read.
* <p>
* If {@link ChannelOption
* {@link Channel} will be made until {@link ChannelHandlerContext
* for outbound messages to be written.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelReadComplete(final ChannelHandlerContext context) {
this.traceOperation(context, "channelReadComplete");
this.timestamps.channelReadCompleted();
context.fireChannelReadComplete();
}
/**
* Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest}
* <p>
* This method then calls {@link ChannelHandlerContext
* {@link ChannelInboundHandler} in the {@link ChannelPipeline}.
* <p>
* Sub-classes may override this method to change behavior.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
*/
@Override
public void channelRegistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelRegistered");
reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites);
this.pendingWrites = new CoalescingBufferQueue(context.channel());
context.fireChannelRegistered();
}
/**
* The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop}
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelUnregistered(final ChannelHandlerContext context) {
this.traceOperation(context, "channelUnregistered");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED);
} else {
logger.debug("{} channelUnregistered exceptionally", context);
}
context.fireChannelUnregistered();
}
/**
* Gets called once the writable state of a {@link Channel} changed. You can check the state with
* {@link Channel
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
*/
@Override
public void channelWritabilityChanged(final ChannelHandlerContext context) {
this.traceOperation(context, "channelWritabilityChanged");
context.fireChannelWritabilityChanged();
}
/**
* Processes {@link ChannelHandlerContext
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param cause Exception caught
*/
@Override
@SuppressWarnings("deprecation")
public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) {
this.traceOperation(context, "exceptionCaught", cause);
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, cause);
if (logger.isDebugEnabled()) {
logger.debug("{} closing due to:", context, cause);
}
context.flush().close();
}
}
/**
* Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline
* <p>
* All but inbound request management events are ignored.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs
* @param event An object representing a user event
*/
@Override
/**
* Called once a bind operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the bind operation is made
* @param localAddress the {@link SocketAddress} to which it should bound
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) {
this.traceOperation(context, "bind", localAddress);
context.bind(localAddress, promise);
}
/**
* Called once a close operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the close operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void close(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "close");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_CLOSE);
} else {
logger.debug("{} closed exceptionally", context);
}
final SslHandler sslHandler = context.pipeline().get(SslHandler.class);
if (sslHandler != null) {
try {
sslHandler.closeOutbound();
} catch (Exception exception) {
if (exception instanceof SSLException) {
logger.debug(
"SslException when attempting to close the outbound SSL connection: ",
exception);
} else {
logger.warn(
"Exception when attempting to close the outbound SSL connection: ",
exception);
throw exception;
}
}
}
context.close(promise);
}
/**
* Called once a connect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the connect operation is made
* @param remoteAddress the {@link SocketAddress} to which it should connect
* @param localAddress the {@link SocketAddress} which is used as source on connect
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void connect(
final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress,
final ChannelPromise promise
) {
this.traceOperation(context, "connect", remoteAddress, localAddress);
context.connect(remoteAddress, localAddress, promise);
}
/**
* Called once a deregister operation is made from the current registered {@link EventLoop}.
*
* @param context the {@link ChannelHandlerContext} for which the deregister operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "deregister");
if (!this.closingExceptionally) {
this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER);
} else {
logger.debug("{} deregistered exceptionally", context);
}
context.deregister(promise);
}
/**
* Called once a disconnect operation is made.
*
* @param context the {@link ChannelHandlerContext} for which the disconnect operation is made
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) {
this.traceOperation(context, "disconnect");
context.disconnect(promise);
}
/**
* Called once a flush operation is made
* <p>
* The flush operation will try to flush out all previous written messages that are pending.
*
* @param context the {@link ChannelHandlerContext} for which the flush operation is made
*/
@Override
public void flush(final ChannelHandlerContext context) {
this.traceOperation(context, "flush");
context.flush();
}
/**
* Intercepts {@link ChannelHandlerContext
*
* @param context the {@link ChannelHandlerContext} for which the read operation is made
*/
@Override
public void read(final ChannelHandlerContext context) {
this.traceOperation(context, "read");
context.read();
}
/**
* Called once a write operation is made
* <p>
* The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed
* to the actual {@link Channel}. This will occur when {@link Channel
*
* @param context the {@link ChannelHandlerContext} for which the write operation is made
* @param message the message to write
* @param promise the {@link ChannelPromise} to notify once the operation completes
*/
@Override
public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) {
this.traceOperation(context, "write", message);
if (message instanceof RntbdRequestRecord) {
final RntbdRequestRecord record = (RntbdRequestRecord) message;
this.timestamps.channelWriteAttempted();
record.setSendingRequestHasStarted();
context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> {
record.stage(RntbdRequestRecord.Stage.SENT);
if (completed.isSuccess()) {
this.timestamps.channelWriteCompleted();
}
});
return;
}
if (message == RntbdHealthCheckRequest.MESSAGE) {
context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> {
if (completed.isSuccess()) {
this.timestamps.channelPingCompleted();
}
});
return;
}
final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s",
message.getClass(),
message));
reportIssue(context, "", error);
this.exceptionCaught(context, error);
}
int pendingRequestCount() {
return this.pendingRequests.size();
}
Optional<RntbdContext> rntbdContext() {
return Optional.of(this.contextFuture.getNow(null));
}
CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() {
return this.contextRequestFuture;
}
boolean hasRequestedRntbdContext() {
return this.contextRequestFuture.getNow(null) != null;
}
boolean hasRntbdContext() {
return this.contextFuture.getNow(null) != null;
}
RntbdChannelState getChannelState(final int demand) {
reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued");
final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand);
if (this.pendingRequests.size() < limit) {
return RntbdChannelState.ok(this.pendingRequests.size());
}
if (this.hasRntbdContext()) {
return RntbdChannelState.pendingLimit(this.pendingRequests.size());
} else {
return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size()));
}
}
void pendWrite(final ByteBuf out, final ChannelPromise promise) {
this.pendingWrites.add(out, promise);
}
Timestamps snapshotTimestamps() {
return new Timestamps(this.timestamps);
}
private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) {
return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> {
reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record);
record.pendingRequestQueueSize(pendingRequests.size());
final Timeout pendingRequestTimeout = record.newTimeout(timeout -> {
requestExpirationExecutor.execute(record::expire);
});
record.whenComplete((response, error) -> {
this.pendingRequests.remove(id);
pendingRequestTimeout.cancel();
});
return record;
});
}
private void completeAllPendingRequestsExceptionally(
final ChannelHandlerContext context, final Throwable throwable
) {
reportIssueUnless(!this.closingExceptionally, context, "", throwable);
this.closingExceptionally = true;
if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) {
this.pendingWrites.releaseAndFailAll(context, throwable);
}
if (this.rntbdConnectionStateListener != null) {
this.rntbdConnectionStateListener.onException(throwable);
}
if (this.pendingRequests.isEmpty()) {
return;
}
if (!this.contextRequestFuture.isDone()) {
this.contextRequestFuture.completeExceptionally(throwable);
}
if (!this.contextFuture.isDone()) {
this.contextFuture.completeExceptionally(throwable);
}
final int count = this.pendingRequests.size();
Exception contextRequestException = null;
String phrase = null;
if (this.contextRequestFuture.isCompletedExceptionally()) {
try {
this.contextRequestFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request write cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request write failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request write failed";
contextRequestException = new ChannelException(error);
}
} else if (this.contextFuture.isCompletedExceptionally()) {
try {
this.contextFuture.get();
} catch (final CancellationException error) {
phrase = "RNTBD context request read cancelled";
contextRequestException = error;
} catch (final Exception error) {
phrase = "RNTBD context request read failed";
contextRequestException = error;
} catch (final Throwable error) {
phrase = "RNTBD context request read failed";
contextRequestException = new ChannelException(error);
}
} else {
phrase = "closed exceptionally";
}
final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count);
final Exception cause;
if (throwable instanceof ClosedChannelException) {
cause = contextRequestException == null
? (ClosedChannelException) throwable
: contextRequestException;
} else {
cause = throwable instanceof Exception
? (Exception) throwable
: new ChannelException(throwable);
}
for (RntbdRequestRecord record : this.pendingRequests.values()) {
final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders();
final String requestUri = record.args().physicalAddress().toString();
final GoneException error = new GoneException(message, cause, null, requestUri);
BridgeInternal.setRequestHeaders(error, requestHeaders);
record.completeExceptionally(error);
}
}
/**
* This method is called for each incoming message of type {@link RntbdResponse} to complete a request.
*
* @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs.
* @param response the {@link RntbdResponse message} received.
*/
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) {
final Long transportRequestId = response.getTransportRequestId();
if (transportRequestId == null) {
reportIssue(context, "response ignored because its transportRequestId is missing: {}", response);
return;
}
final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId);
if (requestRecord == null) {
logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response);
return;
}
requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime());
requestRecord.stage(
RntbdRequestRecord.Stage.RECEIVED,
response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now());
requestRecord.responseLength(response.getMessageLength());
final HttpResponseStatus status = response.getStatus();
final UUID activityId = response.getActivityId();
final int statusCode = status.code();
if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) ||
statusCode == HttpResponseStatus.NOT_MODIFIED.code()) {
final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null));
requestRecord.complete(storeResponse);
} else {
final CosmosException cause;
final long lsn = response.getHeader(RntbdResponseHeader.LSN);
final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId);
final CosmosError error = response.hasPayload()
? new CosmosError(RntbdObjectMapper.readTree(response))
: new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name());
final Map<String, String> responseHeaders = response.getHeaders().asMap(
this.rntbdContext().orElseThrow(IllegalStateException::new), activityId
);
final String resourceAddress = requestRecord.args().physicalAddress() != null ?
requestRecord.args().physicalAddress().toString() : null;
switch (status.code()) {
case StatusCodes.BADREQUEST:
cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.CONFLICT:
cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.FORBIDDEN:
cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.GONE:
final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus));
switch (subStatusCode) {
case SubStatusCodes.COMPLETING_SPLIT:
cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.COMPLETING_PARTITION_MIGRATION:
cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.NAME_CACHE_IS_STALE:
cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case SubStatusCodes.PARTITION_KEY_RANGE_GONE:
cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
GoneException goneExceptionFromService =
new GoneException(error, lsn, partitionKeyRangeId, responseHeaders);
goneExceptionFromService.setIsBasedOn410ResponseFromService();
cause = goneExceptionFromService;
break;
}
break;
case StatusCodes.INTERNAL_SERVER_ERROR:
cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.LOCKED:
cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.METHOD_NOT_ALLOWED:
cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.NOTFOUND:
cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.PRECONDITION_FAILED:
cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_ENTITY_TOO_LARGE:
cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.REQUEST_TIMEOUT:
Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders);
cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner);
break;
case StatusCodes.RETRY_WITH:
cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.SERVICE_UNAVAILABLE:
cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.TOO_MANY_REQUESTS:
cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
case StatusCodes.UNAUTHORIZED:
cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders);
break;
default:
cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders);
break;
}
BridgeInternal.setResourceAddress(cause, resourceAddress);
requestRecord.completeExceptionally(cause);
}
}
private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) {
final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class);
negotiator.removeInboundHandler();
negotiator.removeOutboundHandler();
if (!this.pendingWrites.isEmpty()) {
this.pendingWrites.writeAndRemoveAll(context);
context.flush();
}
}
private static void reportIssue(final Object subject, final String format, final Object... args) {
RntbdReporter.reportIssue(logger, subject, format, args);
}
private static void reportIssueUnless(
final boolean predicate, final Object subject, final String format, final Object... args
) {
RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args);
}
private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) {
logger.trace("{}\n{}\n{}", operationName, context, args);
}
final static class UnhealthyChannelException extends ChannelException {
UnhealthyChannelException(String reason) {
super("health check failed, reason: " + reason);
}
@Override
public Throwable fillInStackTrace() {
return this;
}
}
} |
The common usage on sync API is to create `DeviceManagementClient` via `.buildClient()`. And then API call does not need to have `.block()` or `.toIterable()` to convert async response to sync response. | public static void main(String[] args) {
DeviceManagementAsyncClient client = new DeviceManagementClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_ACCOUNT_ENDPOINT"))
.instanceId(Configuration.getGlobalConfiguration().get("AZURE_INSTANCE_ID"))
.credential(new DefaultAzureCredentialBuilder().build())
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildAsyncClient();
try {
Response<BinaryData> response = client.getDeviceWithResponse(
Configuration.getGlobalConfiguration().get("DEVICEUPDATE_DEVICE"),
null).block();
System.out.println(response.getValue());
} catch (HttpResponseException e) {
if (e.getResponse().getStatusCode() == 404) {
System.out.println("update does not exist");
}
}
} | null).block(); | public static void main(String[] args) {
DeviceManagementClient client = new DeviceManagementClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_ACCOUNT_ENDPOINT"))
.instanceId(Configuration.getGlobalConfiguration().get("AZURE_INSTANCE_ID"))
.credential(new DefaultAzureCredentialBuilder().build())
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildClient();
try {
Response<BinaryData> response = client.getDeviceWithResponse(
Configuration.getGlobalConfiguration().get("DEVICEUPDATE_DEVICE"),
null);
System.out.println(response.getValue());
} catch (HttpResponseException e) {
if (e.getResponse().getStatusCode() == 404) {
System.out.println("update does not exist");
}
}
} | class GetDeviceSample {
} | class GetDeviceSample {
} |
Got it. Sorry I am not Java person, I am just mostly doing copy/paste. :-( | public static void main(String[] args) {
DeviceManagementAsyncClient client = new DeviceManagementClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_ACCOUNT_ENDPOINT"))
.instanceId(Configuration.getGlobalConfiguration().get("AZURE_INSTANCE_ID"))
.credential(new DefaultAzureCredentialBuilder().build())
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildAsyncClient();
try {
Response<BinaryData> response = client.getDeviceWithResponse(
Configuration.getGlobalConfiguration().get("DEVICEUPDATE_DEVICE"),
null).block();
System.out.println(response.getValue());
} catch (HttpResponseException e) {
if (e.getResponse().getStatusCode() == 404) {
System.out.println("update does not exist");
}
}
} | null).block(); | public static void main(String[] args) {
DeviceManagementClient client = new DeviceManagementClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("AZURE_ACCOUNT_ENDPOINT"))
.instanceId(Configuration.getGlobalConfiguration().get("AZURE_INSTANCE_ID"))
.credential(new DefaultAzureCredentialBuilder().build())
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.buildClient();
try {
Response<BinaryData> response = client.getDeviceWithResponse(
Configuration.getGlobalConfiguration().get("DEVICEUPDATE_DEVICE"),
null);
System.out.println(response.getValue());
} catch (HttpResponseException e) {
if (e.getResponse().getStatusCode() == 404) {
System.out.println("update does not exist");
}
}
} | class GetDeviceSample {
} | class GetDeviceSample {
} |
Watch out around `withVirtualTime` as it doesn't always behave the best in a multi-threaded environment. From what I've seen in the past: > StepVerifier.withVirtualTime is a great way to mock execution through time, for example mocking 30 minutes of "running" with a simple API that takes milliseconds to complete. Unfortunately, the base overload uses a shared Scheduler for running which can result in states where the scheduler is shutdown or isn't instantiated when the test runs. So, if you need to use this API you can do one of two things: > > Pass an instance Scheduler instead of using the shared Scheduler. > > Annotated the test class with @Isolated and @Execution(ExecutionMode.SAME_THREAD). (more notes on these later) | void withRetryFluxEmitsItemsLaterThanTimeout() {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofSeconds(5);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(2)
.setTryTimeout(timeout);
final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds());
final AtomicInteger resubscribe = new AtomicInteger();
final TestPublisher<AmqpTransportType> singleItem = TestPublisher.create();
final Flux<AmqpTransportType> flux = singleItem.flux()
.doOnSubscribe(s -> resubscribe.incrementAndGet());
StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(flux, options, timeoutMessage))
.expectSubscription()
.then(() -> singleItem.next(AmqpTransportType.AMQP_WEB_SOCKETS))
.expectNext(AmqpTransportType.AMQP_WEB_SOCKETS)
.expectNoEvent(totalWaitTime)
.thenCancel()
.verify();
assertEquals(1, resubscribe.get());
} | StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(flux, options, timeoutMessage)) | void withRetryFluxEmitsItemsLaterThanTimeout() {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofSeconds(5);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(2)
.setTryTimeout(timeout);
final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds());
final AtomicInteger resubscribe = new AtomicInteger();
final TestPublisher<AmqpTransportType> singleItem = TestPublisher.create();
final Flux<AmqpTransportType> flux = singleItem.flux()
.doOnSubscribe(s -> resubscribe.incrementAndGet());
final VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
try {
StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(flux, options, timeoutMessage),
() -> virtualTimeScheduler, 1)
.expectSubscription()
.then(() -> singleItem.next(AmqpTransportType.AMQP_WEB_SOCKETS))
.expectNext(AmqpTransportType.AMQP_WEB_SOCKETS)
.expectNoEvent(totalWaitTime)
.thenCancel()
.verify();
} finally {
virtualTimeScheduler.dispose();
}
assertEquals(1, resubscribe.get());
} | class RetryUtilTest {
@Test
void getCorrectModeFixed() {
final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
Assertions.assertNotNull(retryPolicy);
assertEquals(FixedAmqpRetryPolicy.class, retryPolicy.getClass());
}
@Test
void getCorrectModeExponential() {
final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setMode(AmqpRetryMode.EXPONENTIAL);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
Assertions.assertNotNull(retryPolicy);
assertEquals(ExponentialAmqpRetryPolicy.class, retryPolicy.getClass());
}
/**
* Tests a retry that times out on a Flux.
*/
@Test
void withRetryFlux() {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofMillis(1500);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(2)
.setTryTimeout(timeout);
final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds())
.plus(timeout);
final AtomicInteger resubscribe = new AtomicInteger();
final Flux<AmqpTransportType> neverFlux = Flux.<AmqpTransportType>never()
.doOnSubscribe(s -> resubscribe.incrementAndGet());
StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage))
.expectSubscription()
.thenAwait(totalWaitTime)
.expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException))
.verify();
assertEquals(options.getMaxRetries() + 1, resubscribe.get());
}
/**
* Tests a retry that times out on a Flux.
*/
@Test
/**
* Tests a retry that times out on a Mono.
*/
@Test
void withRetryMono() {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofMillis(500);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(2)
.setTryTimeout(timeout);
final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds());
final AtomicInteger resubscribe = new AtomicInteger();
final Mono<AmqpTransportType> neverFlux = TestPublisher.<AmqpTransportType>create().mono()
.doOnSubscribe(s -> resubscribe.incrementAndGet());
StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage))
.expectSubscription()
.thenAwait(totalWaitTime)
.expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException))
.verify();
assertEquals(options.getMaxRetries() + 1, resubscribe.get());
}
static Stream<Throwable> withTransientError() {
return Stream.of(
new AmqpException(true, "Test-exception", new AmqpErrorContext("test-ns")),
new TimeoutException("Test-timeout")
);
}
@ParameterizedTest
@MethodSource
void withTransientError(Throwable transientError) {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofSeconds(30);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED)
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(1)
.setTryTimeout(timeout);
final AtomicBoolean wasSent = new AtomicBoolean();
final Flux<Integer> stream = Flux.concat(
Flux.just(0, 1),
Flux.create(sink -> {
if (wasSent.getAndSet(true)) {
sink.next(10);
sink.complete();
} else {
sink.error(transientError);
}
}),
Flux.just(3, 4));
StepVerifier.create(RetryUtil.withRetry(stream, options, timeoutMessage))
.expectNext(0, 1)
.expectNext(0, 1)
.expectNext(10)
.expectNext(3, 4)
.expectComplete()
.verify();
}
static Stream<Throwable> withNonTransientError() {
return Stream.of(
new AmqpException(false, "Test-exception", new AmqpErrorContext("test-ns")),
new IllegalStateException("Some illegal State")
);
}
@ParameterizedTest
@MethodSource
void withNonTransientError(Throwable nonTransientError) {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofSeconds(30);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED)
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(1)
.setTryTimeout(timeout);
final Flux<Integer> stream = Flux.concat(
Flux.just(0, 1, 2),
Flux.error(nonTransientError),
Flux.just(3, 4));
StepVerifier.create(RetryUtil.withRetry(stream, options, timeoutMessage))
.expectNext(0, 1, 2)
.expectErrorMatches(error -> error.equals(nonTransientError))
.verify();
}
static Stream<AmqpRetryOptions> createRetry() {
final AmqpRetryOptions fixed = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED)
.setDelay(Duration.ofSeconds(10))
.setMaxRetries(2)
.setMaxDelay(Duration.ofSeconds(90));
final AmqpRetryOptions exponential = new AmqpRetryOptions()
.setMode(AmqpRetryMode.EXPONENTIAL)
.setDelay(Duration.ofSeconds(5))
.setMaxRetries(5)
.setMaxDelay(Duration.ofSeconds(35));
return Stream.of(fixed, exponential);
}
/**
* Verifies retry options are correctly mapped to a retry spec.
*/
@MethodSource
@ParameterizedTest
void createRetry(AmqpRetryOptions options) {
final Retry actual = RetryUtil.createRetry(options);
assertTrue(actual instanceof RetryBackoffSpec);
final RetryBackoffSpec retrySpec = (RetryBackoffSpec) actual;
assertEquals(options.getMaxRetries(), retrySpec.maxAttempts);
assertEquals(options.getMaxDelay(), retrySpec.maxBackoff);
assertTrue(options.getDelay().compareTo(retrySpec.minBackoff) < 0);
assertTrue(retrySpec.jitterFactor > 0);
}
static Stream<Arguments> retryFilter() {
return Stream.of(
Arguments.of(new TimeoutException("Something"), true),
Arguments.of(new AmqpException(true, "foo message", new AmqpErrorContext("test-namespace")), true),
Arguments.of(new AmqpException(false, "foo message", new AmqpErrorContext("test-ns")), false),
Arguments.of(new IllegalArgumentException("invalid"), false)
);
}
@MethodSource
@ParameterizedTest
void retryFilter(Throwable throwable, boolean expected) {
final AmqpRetryOptions options = new AmqpRetryOptions().setMode(AmqpRetryMode.EXPONENTIAL);
final Retry retry = RetryUtil.createRetry(options);
assertTrue(retry instanceof RetryBackoffSpec);
final RetryBackoffSpec retrySpec = (RetryBackoffSpec) retry;
final Predicate<Throwable> errorFilter = retrySpec.errorFilter;
final boolean actual = errorFilter.test(throwable);
assertEquals(expected, actual);
}
} | class RetryUtilTest {
@Test
void getCorrectModeFixed() {
final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
Assertions.assertNotNull(retryPolicy);
assertEquals(FixedAmqpRetryPolicy.class, retryPolicy.getClass());
}
@Test
void getCorrectModeExponential() {
final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setMode(AmqpRetryMode.EXPONENTIAL);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
Assertions.assertNotNull(retryPolicy);
assertEquals(ExponentialAmqpRetryPolicy.class, retryPolicy.getClass());
}
/**
* Tests a retry that times out on a Flux.
*/
@Test
void withRetryFlux() {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofMillis(1500);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(2)
.setTryTimeout(timeout);
final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds())
.plus(timeout);
final AtomicInteger resubscribe = new AtomicInteger();
final Flux<AmqpTransportType> neverFlux = Flux.<AmqpTransportType>never()
.doOnSubscribe(s -> resubscribe.incrementAndGet());
StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage))
.expectSubscription()
.thenAwait(totalWaitTime)
.expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException))
.verify();
assertEquals(options.getMaxRetries() + 1, resubscribe.get());
}
/**
* Tests a retry that times out on a Flux.
*/
@Test
/**
* Tests a retry that times out on a Mono.
*/
@Test
void withRetryMono() {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofMillis(500);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(2)
.setTryTimeout(timeout);
final Duration totalWaitTime = Duration.ofSeconds(options.getMaxRetries() * options.getDelay().getSeconds());
final AtomicInteger resubscribe = new AtomicInteger();
final Mono<AmqpTransportType> neverFlux = TestPublisher.<AmqpTransportType>create().mono()
.doOnSubscribe(s -> resubscribe.incrementAndGet());
StepVerifier.create(RetryUtil.withRetry(neverFlux, options, timeoutMessage))
.expectSubscription()
.thenAwait(totalWaitTime)
.expectErrorSatisfies(error -> assertTrue(error.getCause() instanceof TimeoutException))
.verify();
assertEquals(options.getMaxRetries() + 1, resubscribe.get());
}
static Stream<Throwable> withTransientError() {
return Stream.of(
new AmqpException(true, "Test-exception", new AmqpErrorContext("test-ns")),
new TimeoutException("Test-timeout")
);
}
@ParameterizedTest
@MethodSource
void withTransientError(Throwable transientError) {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofSeconds(30);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED)
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(1)
.setTryTimeout(timeout);
final AtomicBoolean wasSent = new AtomicBoolean();
final Flux<Integer> stream = Flux.concat(
Flux.just(0, 1),
Flux.create(sink -> {
if (wasSent.getAndSet(true)) {
sink.next(10);
sink.complete();
} else {
sink.error(transientError);
}
}),
Flux.just(3, 4));
StepVerifier.create(RetryUtil.withRetry(stream, options, timeoutMessage))
.expectNext(0, 1)
.expectNext(0, 1)
.expectNext(10)
.expectNext(3, 4)
.expectComplete()
.verify();
}
static Stream<Throwable> withNonTransientError() {
return Stream.of(
new AmqpException(false, "Test-exception", new AmqpErrorContext("test-ns")),
new IllegalStateException("Some illegal State")
);
}
@ParameterizedTest
@MethodSource
void withNonTransientError(Throwable nonTransientError) {
final String timeoutMessage = "Operation timed out.";
final Duration timeout = Duration.ofSeconds(30);
final AmqpRetryOptions options = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED)
.setDelay(Duration.ofSeconds(1))
.setMaxRetries(1)
.setTryTimeout(timeout);
final Flux<Integer> stream = Flux.concat(
Flux.defer(() -> Flux.just(0, 1, 2)),
Flux.defer(() -> Flux.error(nonTransientError)),
Flux.defer(() -> Flux.just(3, 4)));
final VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create(true);
try {
StepVerifier.withVirtualTime(() -> RetryUtil.withRetry(stream, options, timeoutMessage),
() -> virtualTimeScheduler, 4)
.expectNext(0, 1, 2)
.expectErrorMatches(error -> error.equals(nonTransientError))
.verify();
} finally {
virtualTimeScheduler.dispose();
}
}
static Stream<AmqpRetryOptions> createRetry() {
final AmqpRetryOptions fixed = new AmqpRetryOptions()
.setMode(AmqpRetryMode.FIXED)
.setDelay(Duration.ofSeconds(10))
.setMaxRetries(2)
.setMaxDelay(Duration.ofSeconds(90));
final AmqpRetryOptions exponential = new AmqpRetryOptions()
.setMode(AmqpRetryMode.EXPONENTIAL)
.setDelay(Duration.ofSeconds(5))
.setMaxRetries(5)
.setMaxDelay(Duration.ofSeconds(35));
return Stream.of(fixed, exponential);
}
/**
* Verifies retry options are correctly mapped to a retry spec.
*/
@MethodSource
@ParameterizedTest
void createRetry(AmqpRetryOptions options) {
final Retry actual = RetryUtil.createRetry(options);
assertTrue(actual instanceof RetryBackoffSpec);
final RetryBackoffSpec retrySpec = (RetryBackoffSpec) actual;
assertEquals(options.getMaxRetries(), retrySpec.maxAttempts);
assertEquals(options.getMaxDelay(), retrySpec.maxBackoff);
assertTrue(options.getDelay().compareTo(retrySpec.minBackoff) < 0);
assertTrue(retrySpec.jitterFactor > 0);
}
static Stream<Arguments> retryFilter() {
return Stream.of(
Arguments.of(new TimeoutException("Something"), true),
Arguments.of(new AmqpException(true, "foo message", new AmqpErrorContext("test-namespace")), true),
Arguments.of(new AmqpException(false, "foo message", new AmqpErrorContext("test-ns")), false),
Arguments.of(new IllegalArgumentException("invalid"), false)
);
}
@MethodSource
@ParameterizedTest
void retryFilter(Throwable throwable, boolean expected) {
final AmqpRetryOptions options = new AmqpRetryOptions().setMode(AmqpRetryMode.EXPONENTIAL);
final Retry retry = RetryUtil.createRetry(options);
assertTrue(retry instanceof RetryBackoffSpec);
final RetryBackoffSpec retrySpec = (RetryBackoffSpec) retry;
final Predicate<Throwable> errorFilter = retrySpec.errorFilter;
final boolean actual = errorFilter.test(throwable);
assertEquals(expected, actual);
}
} |
container.asyncContainer -> since the asyncContainer is not public, so it can only be accessed under the com.azure.cosmos package, but customer's code generally is under a different package, so it might cause issue? Wonder for the sync API, whether we should introduce a timeout public API within SDK which use the same strategy underlying | public void readItemWithSoftTimeoutAndFallback() throws Exception {
String pk = UUID.randomUUID().toString();
String id = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(id, pk);
ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback");
container.createItem(properties);
String successfulResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofDays(3),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(successfulResponse).isEqualTo(id);
String timedOutResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofNanos(10),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(timedOutResponse).isEqualTo("justFallback");
Thread.sleep(1000);
} | .asyncContainer | public void readItemWithSoftTimeoutAndFallback() throws Exception {
String pk = UUID.randomUUID().toString();
String id = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(id, pk);
ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback");
container.createItem(properties);
String successfulResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofDays(3),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(successfulResponse).isEqualTo(id);
String timedOutResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofNanos(10),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(timedOutResponse).isEqualTo("justFallback");
Thread.sleep(1000);
} | class CosmosItemTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient client;
private CosmosContainer container;
@Factory(dataProvider = "clientBuildersWithDirectSession")
public CosmosItemTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void createItem_alreadyExists() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
try {
container.createItem(properties, new CosmosItemRequestOptions());
} catch (Exception e) {
assertThat(e).isInstanceOf(CosmosException.class);
assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT);
}
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createLargeItem() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
int size = (int) (ONE_MB * 1.5);
BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size));
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(),
new PartitionKey(sb.toString()), options,
InternalObjectNode.class);
validateItemResponse(docDefinition, readResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class);
validateItemResponse(properties, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
static <T> Mono<T> wrapWithSoftTimeoutAndFallback(
Mono<CosmosItemResponse<T>> source,
Duration softTimeout,
T fallback) {
AtomicBoolean timeoutElapsed = new AtomicBoolean(false);
return Mono
.<T>create(sink -> {
source
.subscribeOn(Schedulers.boundedElastic())
.subscribe(
response -> {
if (timeoutElapsed.get()) {
logger.warn(
"COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}",
response.getDiagnostics().toString());
} else {
logger.info("COMPLETED SUCCESSFULLY");
}
sink.success(response.getItem());
},
error -> {
final Throwable unwrappedException = Exceptions.unwrap(error);
if (unwrappedException instanceof CosmosException) {
final CosmosException cosmosException = (CosmosException) unwrappedException;
logger.error(
"COMPLETED WITH COSMOS FAILURE. Diagnostics: {}",
cosmosException.getDiagnostics() != null ?
cosmosException.getDiagnostics().toString() : "n/a",
cosmosException);
} else {
logger.error("COMPLETED WITH GENERIC FAILURE", error);
}
if (timeoutElapsed.get()) {
sink.success();
} else {
sink.error(error);
}
}
);
})
.timeout(softTimeout)
.onErrorResume(error -> {
timeoutElapsed.set(true);
return Mono.just(fallback);
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithEventualConsistency() throws Exception {
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<ObjectNode> readResponse1 = container.readItem(
idAndPkValue,
new PartitionKey(idAndPkValue),
new CosmosItemRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL),
ObjectNode.class);
logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString());
validateIdOfItemResponse(idAndPkValue, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replaceItem() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
String newPropValue = UUID.randomUUID().toString();
BridgeInternal.setProperty(properties, "newProp", newPropValue);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")));
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties,
properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItemUsingEntity() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItems() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItems() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithCustomCorrelationActivityId() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
UUID correlationId = UUID.randomUUID();
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.setCorrelationActivityId(cosmosQueryRequestOptions, correlationId);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
feedResponseIterator1
.iterableByPage()
.forEach(response -> {
assertThat(response.getCorrelationActivityId() == correlationId)
.withFailMessage("response.getCorrelationActivityId");
assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString()))
.withFailMessage("response.getCosmosDiagnostics");
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithEventualConsistency() throws Exception{
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue);
CosmosQueryRequestOptions cosmosQueryRequestOptions =
new CosmosQueryRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator1.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator1.stream().count() == 1);
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator3.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator3.stream().count() == 1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception{
List<String> actualIds = new ArrayList<>();
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartition() throws Exception{
String pkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{
String pkValue = UUID.randomUUID().toString();
List<String> actualIds = new ArrayList<>();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException {
String json = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, pkId);
return
OBJECT_MAPPER.readValue(json, ObjectNode.class);
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(expectedId);
}
} | class CosmosItemTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient client;
private CosmosContainer container;
@Factory(dataProvider = "clientBuildersWithDirectSession")
public CosmosItemTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void createItem_alreadyExists() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
try {
container.createItem(properties, new CosmosItemRequestOptions());
} catch (Exception e) {
assertThat(e).isInstanceOf(CosmosException.class);
assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT);
}
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createLargeItem() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
int size = (int) (ONE_MB * 1.5);
BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size));
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(),
new PartitionKey(sb.toString()), options,
InternalObjectNode.class);
validateItemResponse(docDefinition, readResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class);
validateItemResponse(properties, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
static <T> Mono<T> wrapWithSoftTimeoutAndFallback(
Mono<CosmosItemResponse<T>> source,
Duration softTimeout,
T fallback) {
AtomicBoolean timeoutElapsed = new AtomicBoolean(false);
return Mono
.<T>create(sink -> {
source
.subscribeOn(Schedulers.boundedElastic())
.subscribe(
response -> {
if (timeoutElapsed.get()) {
logger.warn(
"COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}",
response.getDiagnostics().toString());
} else {
logger.info("COMPLETED SUCCESSFULLY");
}
sink.success(response.getItem());
},
error -> {
final Throwable unwrappedException = Exceptions.unwrap(error);
if (unwrappedException instanceof CosmosException) {
final CosmosException cosmosException = (CosmosException) unwrappedException;
logger.error(
"COMPLETED WITH COSMOS FAILURE. Diagnostics: {}",
cosmosException.getDiagnostics() != null ?
cosmosException.getDiagnostics().toString() : "n/a",
cosmosException);
} else {
logger.error("COMPLETED WITH GENERIC FAILURE", error);
}
if (timeoutElapsed.get()) {
sink.success();
} else {
sink.error(error);
}
}
);
})
.timeout(softTimeout)
.onErrorResume(error -> {
timeoutElapsed.set(true);
return Mono.just(fallback);
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithEventualConsistency() throws Exception {
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<ObjectNode> readResponse1 = container.readItem(
idAndPkValue,
new PartitionKey(idAndPkValue),
new CosmosItemRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL),
ObjectNode.class);
logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString());
validateIdOfItemResponse(idAndPkValue, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replaceItem() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
String newPropValue = UUID.randomUUID().toString();
BridgeInternal.setProperty(properties, "newProp", newPropValue);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")));
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties,
properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItemUsingEntity() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItems() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItems() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithCustomCorrelationActivityId() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
UUID correlationId = UUID.randomUUID();
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.setCorrelationActivityId(cosmosQueryRequestOptions, correlationId);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
feedResponseIterator1
.iterableByPage()
.forEach(response -> {
assertThat(response.getCorrelationActivityId() == correlationId)
.withFailMessage("response.getCorrelationActivityId");
assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString()))
.withFailMessage("response.getCosmosDiagnostics");
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithEventualConsistency() throws Exception{
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue);
CosmosQueryRequestOptions cosmosQueryRequestOptions =
new CosmosQueryRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator1.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator1.stream().count() == 1);
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator3.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator3.stream().count() == 1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception{
List<String> actualIds = new ArrayList<>();
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartition() throws Exception{
String pkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{
String pkValue = UUID.randomUUID().toString();
List<String> actualIds = new ArrayList<>();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException {
String json = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, pkId);
return
OBJECT_MAPPER.readValue(json, ObjectNode.class);
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(expectedId);
}
} |
We can evaluate - but should chat with the Central Team before spending any time on it. My guess is they will block this. If Theo adds a public sample it should use CosmosAsyncContainer - just used this here because we don't have a CosmosItemAsyncTest class. | public void readItemWithSoftTimeoutAndFallback() throws Exception {
String pk = UUID.randomUUID().toString();
String id = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(id, pk);
ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback");
container.createItem(properties);
String successfulResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofDays(3),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(successfulResponse).isEqualTo(id);
String timedOutResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofNanos(10),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(timedOutResponse).isEqualTo("justFallback");
Thread.sleep(1000);
} | .asyncContainer | public void readItemWithSoftTimeoutAndFallback() throws Exception {
String pk = UUID.randomUUID().toString();
String id = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(id, pk);
ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback");
container.createItem(properties);
String successfulResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofDays(3),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(successfulResponse).isEqualTo(id);
String timedOutResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofNanos(10),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(timedOutResponse).isEqualTo("justFallback");
Thread.sleep(1000);
} | class CosmosItemTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient client;
private CosmosContainer container;
@Factory(dataProvider = "clientBuildersWithDirectSession")
public CosmosItemTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void createItem_alreadyExists() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
try {
container.createItem(properties, new CosmosItemRequestOptions());
} catch (Exception e) {
assertThat(e).isInstanceOf(CosmosException.class);
assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT);
}
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createLargeItem() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
int size = (int) (ONE_MB * 1.5);
BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size));
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(),
new PartitionKey(sb.toString()), options,
InternalObjectNode.class);
validateItemResponse(docDefinition, readResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class);
validateItemResponse(properties, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
static <T> Mono<T> wrapWithSoftTimeoutAndFallback(
Mono<CosmosItemResponse<T>> source,
Duration softTimeout,
T fallback) {
AtomicBoolean timeoutElapsed = new AtomicBoolean(false);
return Mono
.<T>create(sink -> {
source
.subscribeOn(Schedulers.boundedElastic())
.subscribe(
response -> {
if (timeoutElapsed.get()) {
logger.warn(
"COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}",
response.getDiagnostics().toString());
} else {
logger.info("COMPLETED SUCCESSFULLY");
}
sink.success(response.getItem());
},
error -> {
final Throwable unwrappedException = Exceptions.unwrap(error);
if (unwrappedException instanceof CosmosException) {
final CosmosException cosmosException = (CosmosException) unwrappedException;
logger.error(
"COMPLETED WITH COSMOS FAILURE. Diagnostics: {}",
cosmosException.getDiagnostics() != null ?
cosmosException.getDiagnostics().toString() : "n/a",
cosmosException);
} else {
logger.error("COMPLETED WITH GENERIC FAILURE", error);
}
if (timeoutElapsed.get()) {
sink.success();
} else {
sink.error(error);
}
}
);
})
.timeout(softTimeout)
.onErrorResume(error -> {
timeoutElapsed.set(true);
return Mono.just(fallback);
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithEventualConsistency() throws Exception {
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<ObjectNode> readResponse1 = container.readItem(
idAndPkValue,
new PartitionKey(idAndPkValue),
new CosmosItemRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL),
ObjectNode.class);
logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString());
validateIdOfItemResponse(idAndPkValue, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replaceItem() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
String newPropValue = UUID.randomUUID().toString();
BridgeInternal.setProperty(properties, "newProp", newPropValue);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")));
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties,
properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItemUsingEntity() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItems() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItems() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithCustomCorrelationActivityId() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
UUID correlationId = UUID.randomUUID();
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.setCorrelationActivityId(cosmosQueryRequestOptions, correlationId);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
feedResponseIterator1
.iterableByPage()
.forEach(response -> {
assertThat(response.getCorrelationActivityId() == correlationId)
.withFailMessage("response.getCorrelationActivityId");
assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString()))
.withFailMessage("response.getCosmosDiagnostics");
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithEventualConsistency() throws Exception{
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue);
CosmosQueryRequestOptions cosmosQueryRequestOptions =
new CosmosQueryRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator1.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator1.stream().count() == 1);
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator3.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator3.stream().count() == 1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception{
List<String> actualIds = new ArrayList<>();
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartition() throws Exception{
String pkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{
String pkValue = UUID.randomUUID().toString();
List<String> actualIds = new ArrayList<>();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException {
String json = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, pkId);
return
OBJECT_MAPPER.readValue(json, ObjectNode.class);
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(expectedId);
}
} | class CosmosItemTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient client;
private CosmosContainer container;
@Factory(dataProvider = "clientBuildersWithDirectSession")
public CosmosItemTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void createItem_alreadyExists() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
try {
container.createItem(properties, new CosmosItemRequestOptions());
} catch (Exception e) {
assertThat(e).isInstanceOf(CosmosException.class);
assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT);
}
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createLargeItem() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
int size = (int) (ONE_MB * 1.5);
BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size));
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(),
new PartitionKey(sb.toString()), options,
InternalObjectNode.class);
validateItemResponse(docDefinition, readResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class);
validateItemResponse(properties, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
static <T> Mono<T> wrapWithSoftTimeoutAndFallback(
Mono<CosmosItemResponse<T>> source,
Duration softTimeout,
T fallback) {
AtomicBoolean timeoutElapsed = new AtomicBoolean(false);
return Mono
.<T>create(sink -> {
source
.subscribeOn(Schedulers.boundedElastic())
.subscribe(
response -> {
if (timeoutElapsed.get()) {
logger.warn(
"COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}",
response.getDiagnostics().toString());
} else {
logger.info("COMPLETED SUCCESSFULLY");
}
sink.success(response.getItem());
},
error -> {
final Throwable unwrappedException = Exceptions.unwrap(error);
if (unwrappedException instanceof CosmosException) {
final CosmosException cosmosException = (CosmosException) unwrappedException;
logger.error(
"COMPLETED WITH COSMOS FAILURE. Diagnostics: {}",
cosmosException.getDiagnostics() != null ?
cosmosException.getDiagnostics().toString() : "n/a",
cosmosException);
} else {
logger.error("COMPLETED WITH GENERIC FAILURE", error);
}
if (timeoutElapsed.get()) {
sink.success();
} else {
sink.error(error);
}
}
);
})
.timeout(softTimeout)
.onErrorResume(error -> {
timeoutElapsed.set(true);
return Mono.just(fallback);
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithEventualConsistency() throws Exception {
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<ObjectNode> readResponse1 = container.readItem(
idAndPkValue,
new PartitionKey(idAndPkValue),
new CosmosItemRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL),
ObjectNode.class);
logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString());
validateIdOfItemResponse(idAndPkValue, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replaceItem() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
String newPropValue = UUID.randomUUID().toString();
BridgeInternal.setProperty(properties, "newProp", newPropValue);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")));
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties,
properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItemUsingEntity() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItems() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItems() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithCustomCorrelationActivityId() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
UUID correlationId = UUID.randomUUID();
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.setCorrelationActivityId(cosmosQueryRequestOptions, correlationId);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
feedResponseIterator1
.iterableByPage()
.forEach(response -> {
assertThat(response.getCorrelationActivityId() == correlationId)
.withFailMessage("response.getCorrelationActivityId");
assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString()))
.withFailMessage("response.getCosmosDiagnostics");
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithEventualConsistency() throws Exception{
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue);
CosmosQueryRequestOptions cosmosQueryRequestOptions =
new CosmosQueryRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator1.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator1.stream().count() == 1);
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator3.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator3.stream().count() == 1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception{
List<String> actualIds = new ArrayList<>();
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartition() throws Exception{
String pkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{
String pkValue = UUID.randomUUID().toString();
List<String> actualIds = new ArrayList<>();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException {
String json = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, pkId);
return
OBJECT_MAPPER.readValue(json, ObjectNode.class);
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(expectedId);
}
} |
Make sense, so only for async API currently | public void readItemWithSoftTimeoutAndFallback() throws Exception {
String pk = UUID.randomUUID().toString();
String id = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(id, pk);
ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback");
container.createItem(properties);
String successfulResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofDays(3),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(successfulResponse).isEqualTo(id);
String timedOutResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofNanos(10),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(timedOutResponse).isEqualTo("justFallback");
Thread.sleep(1000);
} | .asyncContainer | public void readItemWithSoftTimeoutAndFallback() throws Exception {
String pk = UUID.randomUUID().toString();
String id = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(id, pk);
ObjectNode fallBackProperties = getDocumentDefinition("justFallback", "justFallback");
container.createItem(properties);
String successfulResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofDays(3),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(successfulResponse).isEqualTo(id);
String timedOutResponse = wrapWithSoftTimeoutAndFallback(
container
.asyncContainer
.readItem(id,
new PartitionKey(pk),
new CosmosItemRequestOptions(),
ObjectNode.class),
Duration.ofNanos(10),
fallBackProperties)
.map(node -> node.get("id").asText())
.block();
assertThat(timedOutResponse).isEqualTo("justFallback");
Thread.sleep(1000);
} | class CosmosItemTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient client;
private CosmosContainer container;
@Factory(dataProvider = "clientBuildersWithDirectSession")
public CosmosItemTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void createItem_alreadyExists() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
try {
container.createItem(properties, new CosmosItemRequestOptions());
} catch (Exception e) {
assertThat(e).isInstanceOf(CosmosException.class);
assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT);
}
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createLargeItem() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
int size = (int) (ONE_MB * 1.5);
BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size));
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(),
new PartitionKey(sb.toString()), options,
InternalObjectNode.class);
validateItemResponse(docDefinition, readResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class);
validateItemResponse(properties, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
static <T> Mono<T> wrapWithSoftTimeoutAndFallback(
Mono<CosmosItemResponse<T>> source,
Duration softTimeout,
T fallback) {
AtomicBoolean timeoutElapsed = new AtomicBoolean(false);
return Mono
.<T>create(sink -> {
source
.subscribeOn(Schedulers.boundedElastic())
.subscribe(
response -> {
if (timeoutElapsed.get()) {
logger.warn(
"COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}",
response.getDiagnostics().toString());
} else {
logger.info("COMPLETED SUCCESSFULLY");
}
sink.success(response.getItem());
},
error -> {
final Throwable unwrappedException = Exceptions.unwrap(error);
if (unwrappedException instanceof CosmosException) {
final CosmosException cosmosException = (CosmosException) unwrappedException;
logger.error(
"COMPLETED WITH COSMOS FAILURE. Diagnostics: {}",
cosmosException.getDiagnostics() != null ?
cosmosException.getDiagnostics().toString() : "n/a",
cosmosException);
} else {
logger.error("COMPLETED WITH GENERIC FAILURE", error);
}
if (timeoutElapsed.get()) {
sink.success();
} else {
sink.error(error);
}
}
);
})
.timeout(softTimeout)
.onErrorResume(error -> {
timeoutElapsed.set(true);
return Mono.just(fallback);
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithEventualConsistency() throws Exception {
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<ObjectNode> readResponse1 = container.readItem(
idAndPkValue,
new PartitionKey(idAndPkValue),
new CosmosItemRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL),
ObjectNode.class);
logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString());
validateIdOfItemResponse(idAndPkValue, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replaceItem() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
String newPropValue = UUID.randomUUID().toString();
BridgeInternal.setProperty(properties, "newProp", newPropValue);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")));
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties,
properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItemUsingEntity() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItems() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItems() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithCustomCorrelationActivityId() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
UUID correlationId = UUID.randomUUID();
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.setCorrelationActivityId(cosmosQueryRequestOptions, correlationId);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
feedResponseIterator1
.iterableByPage()
.forEach(response -> {
assertThat(response.getCorrelationActivityId() == correlationId)
.withFailMessage("response.getCorrelationActivityId");
assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString()))
.withFailMessage("response.getCosmosDiagnostics");
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithEventualConsistency() throws Exception{
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue);
CosmosQueryRequestOptions cosmosQueryRequestOptions =
new CosmosQueryRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator1.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator1.stream().count() == 1);
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator3.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator3.stream().count() == 1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception{
List<String> actualIds = new ArrayList<>();
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartition() throws Exception{
String pkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{
String pkValue = UUID.randomUUID().toString();
List<String> actualIds = new ArrayList<>();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException {
String json = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, pkId);
return
OBJECT_MAPPER.readValue(json, ObjectNode.class);
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(expectedId);
}
} | class CosmosItemTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient client;
private CosmosContainer container;
@Factory(dataProvider = "clientBuildersWithDirectSession")
public CosmosItemTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void createItem_alreadyExists() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse1 = container.createItem(properties, new CosmosItemRequestOptions());
validateItemResponse(properties, itemResponse1);
try {
container.createItem(properties, new CosmosItemRequestOptions());
} catch (Exception e) {
assertThat(e).isInstanceOf(CosmosException.class);
assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT);
}
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createLargeItem() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
int size = (int) (ONE_MB * 1.5);
BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size));
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition, new CosmosItemRequestOptions());
validateItemResponse(docDefinition, itemResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithVeryLargePartitionKey() throws Exception {
InternalObjectNode docDefinition = getDocumentDefinition(UUID.randomUUID().toString());
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 100; i++) {
sb.append(i).append("x");
}
BridgeInternal.setProperty(docDefinition, "mypk", sb.toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(docDefinition);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(docDefinition.getId(),
new PartitionKey(sb.toString()), options,
InternalObjectNode.class);
validateItemResponse(docDefinition, readResponse);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<InternalObjectNode> readResponse1 = container.readItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class);
validateItemResponse(properties, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
static <T> Mono<T> wrapWithSoftTimeoutAndFallback(
Mono<CosmosItemResponse<T>> source,
Duration softTimeout,
T fallback) {
AtomicBoolean timeoutElapsed = new AtomicBoolean(false);
return Mono
.<T>create(sink -> {
source
.subscribeOn(Schedulers.boundedElastic())
.subscribe(
response -> {
if (timeoutElapsed.get()) {
logger.warn(
"COMPLETED SUCCESSFULLY after timeout elapsed. Diagnostics: {}",
response.getDiagnostics().toString());
} else {
logger.info("COMPLETED SUCCESSFULLY");
}
sink.success(response.getItem());
},
error -> {
final Throwable unwrappedException = Exceptions.unwrap(error);
if (unwrappedException instanceof CosmosException) {
final CosmosException cosmosException = (CosmosException) unwrappedException;
logger.error(
"COMPLETED WITH COSMOS FAILURE. Diagnostics: {}",
cosmosException.getDiagnostics() != null ?
cosmosException.getDiagnostics().toString() : "n/a",
cosmosException);
} else {
logger.error("COMPLETED WITH GENERIC FAILURE", error);
}
if (timeoutElapsed.get()) {
sink.success();
} else {
sink.error(error);
}
}
);
})
.timeout(softTimeout)
.onErrorResume(error -> {
timeoutElapsed.set(true);
return Mono.just(fallback);
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readItemWithEventualConsistency() throws Exception {
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosItemResponse<ObjectNode> readResponse1 = container.readItem(
idAndPkValue,
new PartitionKey(idAndPkValue),
new CosmosItemRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL),
ObjectNode.class);
logger.info("REQUEST DIAGNOSTICS: {}", readResponse1.getDiagnostics().toString());
validateIdOfItemResponse(idAndPkValue, readResponse1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replaceItem() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
validateItemResponse(properties, itemResponse);
String newPropValue = UUID.randomUUID().toString();
BridgeInternal.setProperty(properties, "newProp", newPropValue);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(options, new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")));
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(properties,
properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(ModelBridgeInternal.getObjectFromJsonSerializable(BridgeInternal.getProperties(replace), "newProp")).isEqualTo(newPropValue);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItem() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(properties.getId(),
new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(properties, "mypk")),
options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deleteItemUsingEntity() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(itemResponse.getItem(), options);
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItems() throws Exception {
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.readAllItems(cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItems() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithCustomCorrelationActivityId() throws Exception{
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
UUID correlationId = UUID.randomUUID();
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.setCorrelationActivityId(cosmosQueryRequestOptions, correlationId);
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
feedResponseIterator1
.iterableByPage()
.forEach(response -> {
assertThat(response.getCorrelationActivityId() == correlationId)
.withFailMessage("response.getCorrelationActivityId");
assertThat(response.getCosmosDiagnostics().toString().contains(correlationId.toString()))
.withFailMessage("response.getCosmosDiagnostics");
});
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithEventualConsistency() throws Exception{
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient());
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
String idAndPkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(idAndPkValue, idAndPkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
String query = String.format("SELECT * from c where c.id = '%s'", idAndPkValue);
CosmosQueryRequestOptions cosmosQueryRequestOptions =
new CosmosQueryRequestOptions()
.setSessionToken(StringUtils.repeat("SomeManualInvalidSessionToken", 2000))
.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator1.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator1.stream().count() == 1);
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.queryItems(querySpec, cosmosQueryRequestOptions, ObjectNode.class);
feedResponseIterator3.handle(
(r) -> logger.info("Query RequestDiagnostics: {}", r.getCosmosDiagnostics().toString()));
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
assertThat(feedResponseIterator3.stream().count() == 1);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() throws Exception{
List<String> actualIds = new ArrayList<>();
InternalObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
properties = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(properties);
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.queryItems(query, cosmosQueryRequestOptions, InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartition() throws Exception{
String pkValue = UUID.randomUUID().toString();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
CosmosItemResponse<ObjectNode> itemResponse = container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedIterable<ObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator1.iterator().hasNext()).isTrue();
CosmosPagedIterable<ObjectNode> feedResponseIterator3 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
ObjectNode.class);
assertThat(feedResponseIterator3.iterator().hasNext()).isTrue();
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readAllItemsOfLogicalPartitionWithContinuationTokenAndPageSize() throws Exception{
String pkValue = UUID.randomUUID().toString();
List<String> actualIds = new ArrayList<>();
ObjectNode properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
properties = getDocumentDefinition(UUID.randomUUID().toString(), pkValue);
container.createItem(properties);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedIterable<InternalObjectNode> feedResponseIterator1 =
container.readAllItems(
new PartitionKey(pkValue),
cosmosQueryRequestOptions,
InternalObjectNode.class);
do {
Iterable<FeedResponse<InternalObjectNode>> feedResponseIterable =
feedResponseIterator1.iterableByPage(continuationToken, pageSize);
for (FeedResponse<InternalObjectNode> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while(continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private ObjectNode getDocumentDefinition(String documentId, String pkId) throws JsonProcessingException {
String json = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, pkId);
return
OBJECT_MAPPER.readValue(json, ObjectNode.class);
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) {
assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(createResponse).getId())
.as("check Resource Id")
.isEqualTo(expectedId);
}
} |
I think we probably need to cache the result here, turning into a hot publisher, else for each subscribe, it will trigger a call to gateway? https://github.com/Azure/azure-sdk-for-java/blob/d05f7e4ec323f32df36545871ef0685e4a645537/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/caches/AsyncLazy.java#L38 https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#cache-- | public Mono<TValue> getValueAsync() {
Mono<TValue> valueMono = this.value;
if (valueMono != null) {
return valueMono;
}
valueLock.lock();
try {
if (this.value != null) {
return this.value;
}
this.value = this.createValueFunc.apply(null);
return this.value;
} finally {
valueLock.unlock();
}
} | this.value = this.createValueFunc.apply(null); | public Mono<TValue> getValueAsync() {
return this.value.get();
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final ReentrantLock valueLock = new ReentrantLock();
private final ReentrantLock removeFromCacheLock = new ReentrantLock();
private boolean removeFromCache = false;
private Mono<TValue> value;
private Mono<TValue> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
this.value = Mono.just(value);
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Mono<TValue> value) {
this.createValueFunc = null;
this.value = value;
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = null;
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value;
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value;
AtomicReference<TValue> originalValue = new AtomicReference<>();
valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
});
AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>();
valueLock.lock();
try {
this.refreshInProgress = createRefreshFunction.apply(originalValue.get());
refreshMono.set(this.refreshInProgress);
return refreshMono.get();
} finally {
valueLock.unlock();
}
}
public boolean shouldRemoveFromCache() {
if (this.removeFromCache) {
return false;
}
removeFromCacheLock.lock();
try {
if (this.removeFromCache) {
return false;
}
this.removeFromCache = true;
return true;
} finally {
removeFromCacheLock.unlock();
}
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} |
instead of using lock here, probably can use AtomicReference -> compareAndSet, | public Mono<TValue> getValueAsync() {
Mono<TValue> valueMono = this.value;
if (valueMono != null) {
return valueMono;
}
valueLock.lock();
try {
if (this.value != null) {
return this.value;
}
this.value = this.createValueFunc.apply(null);
return this.value;
} finally {
valueLock.unlock();
}
} | valueLock.lock(); | public Mono<TValue> getValueAsync() {
return this.value.get();
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final ReentrantLock valueLock = new ReentrantLock();
private final ReentrantLock removeFromCacheLock = new ReentrantLock();
private boolean removeFromCache = false;
private Mono<TValue> value;
private Mono<TValue> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
this.value = Mono.just(value);
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Mono<TValue> value) {
this.createValueFunc = null;
this.value = value;
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = null;
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value;
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value;
AtomicReference<TValue> originalValue = new AtomicReference<>();
valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
});
AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>();
valueLock.lock();
try {
this.refreshInProgress = createRefreshFunction.apply(originalValue.get());
refreshMono.set(this.refreshInProgress);
return refreshMono.get();
} finally {
valueLock.unlock();
}
}
public boolean shouldRemoveFromCache() {
if (this.removeFromCache) {
return false;
}
removeFromCacheLock.lock();
try {
if (this.removeFromCache) {
return false;
}
this.removeFromCache = true;
return true;
} finally {
removeFromCacheLock.unlock();
}
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} |
Same here, maybe consider using AtomicBoolean? | public boolean shouldRemoveFromCache() {
if (this.removeFromCache) {
return false;
}
removeFromCacheLock.lock();
try {
if (this.removeFromCache) {
return false;
}
this.removeFromCache = true;
return true;
} finally {
removeFromCacheLock.unlock();
}
} | removeFromCacheLock.lock(); | public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final ReentrantLock valueLock = new ReentrantLock();
private final ReentrantLock removeFromCacheLock = new ReentrantLock();
private boolean removeFromCache = false;
private Mono<TValue> value;
private Mono<TValue> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
this.value = Mono.just(value);
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Mono<TValue> value) {
this.createValueFunc = null;
this.value = value;
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = null;
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
Mono<TValue> valueMono = this.value;
if (valueMono != null) {
return valueMono;
}
valueLock.lock();
try {
if (this.value != null) {
return this.value;
}
this.value = this.createValueFunc.apply(null);
return this.value;
} finally {
valueLock.unlock();
}
}
public Mono<TValue> value() {
return value;
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value;
AtomicReference<TValue> originalValue = new AtomicReference<>();
valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
});
AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>();
valueLock.lock();
try {
this.refreshInProgress = createRefreshFunction.apply(originalValue.get());
refreshMono.set(this.refreshInProgress);
return refreshMono.get();
} finally {
valueLock.unlock();
}
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
} |
Add validation about cache key? | public void createItem_withCacheRefresh() throws InterruptedException {
String containerId = "bulksplittestcontainer_" + UUID.randomUUID();
int totalRequest = getTotalRequest();
CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk");
CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block();
CosmosAsyncContainer container = createdDatabase.getContainer(containerId);
Flux<CosmosItemOperation> cosmosItemOperationFlux1 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
TestDoc testDoc = this.populateTestDoc(partitionKey);
return CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey));
});
Flux<CosmosItemOperation> cosmosItemOperationFlux2 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey);
return CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<AsyncCacheNonBlockingIntegrationTest>> responseFlux =
container.executeBulkOperations(cosmosItemOperationFlux1, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) this.bulkClient.getDocClientWrapper();
ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyBeforePartition = routingMap.keys().nextElement();
List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, this.bulkClient);
logger.info("Scaling up throughput for split");
ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000);
ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block();
logger.info("Throughput replace request submitted for {} ",
throughputResponse.getProperties().getManualThroughput());
throughputResponse = container.readThroughput().block();
while (true) {
assert throughputResponse != null;
if (!throughputResponse.isReplacePending()) {
break;
}
logger.info("Waiting for split to complete");
Thread.sleep(10 * 1000);
throughputResponse = container.readThroughput().block();
}
List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId,
this.bulkClient);
assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size())
.as("Partition ranges should increase after split");
logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size());
routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyAfterPartition = routingMap.keys().nextElement();
assertThat(cacheKeyBeforePartition).isEqualTo(cacheKeyAfterPartition);
responseFlux = container.executeBulkOperations(cosmosItemOperationFlux2, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
container.delete().block();
} | assertThat(processedDoc.get()).isEqualTo(totalRequest); | public void createItem_withCacheRefresh() throws InterruptedException {
String containerId = "bulksplittestcontainer_" + UUID.randomUUID();
int totalRequest = getTotalRequest();
CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk");
CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block();
CosmosAsyncContainer container = createdDatabase.getContainer(containerId);
Flux<CosmosItemOperation> cosmosItemOperationFlux1 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
TestDoc testDoc = this.populateTestDoc(partitionKey);
return CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey));
});
Flux<CosmosItemOperation> cosmosItemOperationFlux2 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey);
return CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<AsyncCacheNonBlockingIntegrationTest>> responseFlux =
container.executeBulkOperations(cosmosItemOperationFlux1, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) this.bulkClient.getDocClientWrapper();
ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyBeforePartition = routingMap.keys().nextElement();
List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, this.bulkClient);
logger.info("Scaling up throughput for split");
ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000);
ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block();
logger.info("Throughput replace request submitted for {} ",
throughputResponse.getProperties().getManualThroughput());
throughputResponse = container.readThroughput().block();
while (true) {
assert throughputResponse != null;
if (!throughputResponse.isReplacePending()) {
break;
}
logger.info("Waiting for split to complete");
Thread.sleep(10 * 1000);
throughputResponse = container.readThroughput().block();
}
List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId,
this.bulkClient);
assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size())
.as("Partition ranges should increase after split");
logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size());
routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyAfterPartition = routingMap.keys().nextElement();
assertThat(cacheKeyBeforePartition).isEqualTo(cacheKeyAfterPartition);
responseFlux = container.executeBulkOperations(cosmosItemOperationFlux2, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
container.delete().block();
} | class AsyncCacheNonBlockingIntegrationTest extends BatchTestBase {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlockingIntegrationTest.class);
private CosmosAsyncClient bulkClient;
private CosmosAsyncDatabase createdDatabase;
@Factory(dataProvider = "simpleClientBuilderGatewaySession")
public AsyncCacheNonBlockingIntegrationTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosBulkAsyncTest() {
assertThat(this.bulkClient).isNull();
this.bulkClient = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(this.bulkClient);
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeCloseAsync(this.bulkClient);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT * 200)
private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) {
RxPartitionKeyRangeCache partitionKeyRangeCache =
ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient);
AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache =
ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache);
return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache);
}
private List<PartitionKeyRange> getPartitionKeyRanges(
String containerId, CosmosAsyncClient asyncClient) {
List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>();
AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(asyncClient);
List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient
.readPartitionKeyRanges("/dbs/" + createdDatabase.getId()
+ "/colls/" + containerId,
new CosmosQueryRequestOptions())
.collectList().block();
partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults()));
return partitionKeyRanges;
}
private int getTotalRequest() {
int countRequest = new Random().nextInt(100) + 200;
logger.info("Total count of request for this test case: " + countRequest);
return countRequest;
}
} | class AsyncCacheNonBlockingIntegrationTest extends BatchTestBase {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlockingIntegrationTest.class);
private CosmosAsyncClient bulkClient;
private CosmosAsyncDatabase createdDatabase;
@Factory(dataProvider = "simpleClientBuilderGatewaySession")
public AsyncCacheNonBlockingIntegrationTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosBulkAsyncTest() {
assertThat(this.bulkClient).isNull();
this.bulkClient = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(this.bulkClient);
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeCloseAsync(this.bulkClient);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT * 200)
private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) {
RxPartitionKeyRangeCache partitionKeyRangeCache =
ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient);
AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache =
ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache);
return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache);
}
private List<PartitionKeyRange> getPartitionKeyRanges(
String containerId, CosmosAsyncClient asyncClient) {
List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>();
AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(asyncClient);
List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient
.readPartitionKeyRanges("/dbs/" + createdDatabase.getId()
+ "/colls/" + containerId,
new CosmosQueryRequestOptions())
.collectList().block();
partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults()));
return partitionKeyRanges;
}
private int getTotalRequest() {
int countRequest = new Random().nextInt(100) + 200;
logger.info("Total count of request for this test case: " + countRequest);
return countRequest;
}
} |
let `numberOfCacheRefreshes.incrementAndGet();` be part of the Mono, you will see the issue mentioned above about `I think we probably need to cache the result here, turning into a hot publisher, else for each subscribe, it will trigger a call to gateway?` | public void getAsync() {
AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0);
final Function<Integer, Mono<Integer>> refreshFunc = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2);
};
AsyncCacheNonBlocking<Integer, Integer> cache = new AsyncCacheNonBlocking<>();
List<Mono<Integer>> tasks = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks.add(cache.getAsync(key, value -> refreshFunc.apply(key), false));
}
Flux<Integer> o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList()));
o.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(10);
assertThat(cache.getAsync(2, value -> refreshFunc.apply(2), false).block()).isEqualTo(4);
Function<Integer, Mono<Integer>> refreshFunc1 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 1);
};
List<Mono<Integer>> tasks1 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks1.add(cache.getAsync(key, value -> refreshFunc1.apply(key), true));
}
Flux<Integer> o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList()));
o1.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc1.apply(2), false).block()).isEqualTo(5);
Function<Integer, Mono<Integer>> refreshFunc2 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 3);
};
List<Mono<Integer>> tasks2 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks2.add(cache.getAsync(key, value -> refreshFunc2.apply(key), false));
}
Flux<Integer> o2 = Flux.merge(tasks2.stream().map(Mono::flux).collect(Collectors.toList()));
o2.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc2.apply(2), false).block()).isEqualTo(5);
} | final Function<Integer, Mono<Integer>> refreshFunc = key -> { | public void getAsync() {
AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0);
final Function<Integer, Mono<Integer>> refreshFunc = key -> {
return Mono.just(key * 2)
.doOnNext(t -> {
numberOfCacheRefreshes.incrementAndGet();
}).cache();
};
AsyncCacheNonBlocking<Integer, Integer> cache = new AsyncCacheNonBlocking<>();
List<Mono<Integer>> tasks = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks.add(cache.getAsync(key, value -> refreshFunc.apply(key), forceRefresh -> false));
}
Flux<Integer> o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList()));
o.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(10);
assertThat(cache.getAsync(2, value -> refreshFunc.apply(2), forceRefresh -> false).block()).isEqualTo(4);
Function<Integer, Mono<Integer>> refreshFunc1 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 1);
};
List<Mono<Integer>> tasks1 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks1.add(cache.getAsync(key, value -> refreshFunc1.apply(key), forceRefresh -> true));
}
Flux<Integer> o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList()));
o1.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc1.apply(2), forceRefresh -> false).block()).isEqualTo(5);
Function<Integer, Mono<Integer>> refreshFunc2 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 3);
};
List<Mono<Integer>> tasks2 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks2.add(cache.getAsync(key, value -> refreshFunc2.apply(key), forceRefresh -> false));
}
Flux<Integer> o2 = Flux.merge(tasks2.stream().map(Mono::flux).collect(Collectors.toList()));
o2.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc2.apply(2), forceRefresh -> false).block()).isEqualTo(5);
} | class AsyncCacheNonBlockingTest {
private static final int TIMEOUT = 2000;
@Test(groups = {"unit"}, timeOut = TIMEOUT)
} | class AsyncCacheNonBlockingTest {
private static final int TIMEOUT = 2000;
@Test(groups = {"unit"}, timeOut = TIMEOUT)
} |
Added | public Mono<TValue> getValueAsync() {
Mono<TValue> valueMono = this.value;
if (valueMono != null) {
return valueMono;
}
valueLock.lock();
try {
if (this.value != null) {
return this.value;
}
this.value = this.createValueFunc.apply(null);
return this.value;
} finally {
valueLock.unlock();
}
} | valueLock.lock(); | public Mono<TValue> getValueAsync() {
return this.value.get();
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final ReentrantLock valueLock = new ReentrantLock();
private final ReentrantLock removeFromCacheLock = new ReentrantLock();
private boolean removeFromCache = false;
private Mono<TValue> value;
private Mono<TValue> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
this.value = Mono.just(value);
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Mono<TValue> value) {
this.createValueFunc = null;
this.value = value;
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = null;
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value;
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value;
AtomicReference<TValue> originalValue = new AtomicReference<>();
valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
});
AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>();
valueLock.lock();
try {
this.refreshInProgress = createRefreshFunction.apply(originalValue.get());
refreshMono.set(this.refreshInProgress);
return refreshMono.get();
} finally {
valueLock.unlock();
}
}
public boolean shouldRemoveFromCache() {
if (this.removeFromCache) {
return false;
}
removeFromCacheLock.lock();
try {
if (this.removeFromCache) {
return false;
}
this.removeFromCache = true;
return true;
} finally {
removeFromCacheLock.unlock();
}
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} |
You are correct, fixed it. | public Mono<TValue> getValueAsync() {
Mono<TValue> valueMono = this.value;
if (valueMono != null) {
return valueMono;
}
valueLock.lock();
try {
if (this.value != null) {
return this.value;
}
this.value = this.createValueFunc.apply(null);
return this.value;
} finally {
valueLock.unlock();
}
} | this.value = this.createValueFunc.apply(null); | public Mono<TValue> getValueAsync() {
return this.value.get();
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final ReentrantLock valueLock = new ReentrantLock();
private final ReentrantLock removeFromCacheLock = new ReentrantLock();
private boolean removeFromCache = false;
private Mono<TValue> value;
private Mono<TValue> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
this.value = Mono.just(value);
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Mono<TValue> value) {
this.createValueFunc = null;
this.value = value;
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = null;
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value;
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value;
AtomicReference<TValue> originalValue = new AtomicReference<>();
valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
});
AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>();
valueLock.lock();
try {
this.refreshInProgress = createRefreshFunction.apply(originalValue.get());
refreshMono.set(this.refreshInProgress);
return refreshMono.get();
} finally {
valueLock.unlock();
}
}
public boolean shouldRemoveFromCache() {
if (this.removeFromCache) {
return false;
}
removeFromCacheLock.lock();
try {
if (this.removeFromCache) {
return false;
}
this.removeFromCache = true;
return true;
} finally {
removeFromCacheLock.unlock();
}
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} |
Added. | public boolean shouldRemoveFromCache() {
if (this.removeFromCache) {
return false;
}
removeFromCacheLock.lock();
try {
if (this.removeFromCache) {
return false;
}
this.removeFromCache = true;
return true;
} finally {
removeFromCacheLock.unlock();
}
} | removeFromCacheLock.lock(); | public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final ReentrantLock valueLock = new ReentrantLock();
private final ReentrantLock removeFromCacheLock = new ReentrantLock();
private boolean removeFromCache = false;
private Mono<TValue> value;
private Mono<TValue> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
this.value = Mono.just(value);
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Mono<TValue> value) {
this.createValueFunc = null;
this.value = value;
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = null;
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
Mono<TValue> valueMono = this.value;
if (valueMono != null) {
return valueMono;
}
valueLock.lock();
try {
if (this.value != null) {
return this.value;
}
this.value = this.createValueFunc.apply(null);
return this.value;
} finally {
valueLock.unlock();
}
}
public Mono<TValue> value() {
return value;
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value;
AtomicReference<TValue> originalValue = new AtomicReference<>();
valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
});
AtomicReference<Mono<TValue>> refreshMono = new AtomicReference<>();
valueLock.lock();
try {
this.refreshInProgress = createRefreshFunction.apply(originalValue.get());
refreshMono.set(this.refreshInProgress);
return refreshMono.get();
} finally {
valueLock.unlock();
}
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
} |
Updated the logic to hot publisher. | public void getAsync() {
AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0);
final Function<Integer, Mono<Integer>> refreshFunc = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2);
};
AsyncCacheNonBlocking<Integer, Integer> cache = new AsyncCacheNonBlocking<>();
List<Mono<Integer>> tasks = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks.add(cache.getAsync(key, value -> refreshFunc.apply(key), false));
}
Flux<Integer> o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList()));
o.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(10);
assertThat(cache.getAsync(2, value -> refreshFunc.apply(2), false).block()).isEqualTo(4);
Function<Integer, Mono<Integer>> refreshFunc1 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 1);
};
List<Mono<Integer>> tasks1 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks1.add(cache.getAsync(key, value -> refreshFunc1.apply(key), true));
}
Flux<Integer> o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList()));
o1.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc1.apply(2), false).block()).isEqualTo(5);
Function<Integer, Mono<Integer>> refreshFunc2 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 3);
};
List<Mono<Integer>> tasks2 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks2.add(cache.getAsync(key, value -> refreshFunc2.apply(key), false));
}
Flux<Integer> o2 = Flux.merge(tasks2.stream().map(Mono::flux).collect(Collectors.toList()));
o2.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc2.apply(2), false).block()).isEqualTo(5);
} | final Function<Integer, Mono<Integer>> refreshFunc = key -> { | public void getAsync() {
AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0);
final Function<Integer, Mono<Integer>> refreshFunc = key -> {
return Mono.just(key * 2)
.doOnNext(t -> {
numberOfCacheRefreshes.incrementAndGet();
}).cache();
};
AsyncCacheNonBlocking<Integer, Integer> cache = new AsyncCacheNonBlocking<>();
List<Mono<Integer>> tasks = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks.add(cache.getAsync(key, value -> refreshFunc.apply(key), forceRefresh -> false));
}
Flux<Integer> o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList()));
o.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(10);
assertThat(cache.getAsync(2, value -> refreshFunc.apply(2), forceRefresh -> false).block()).isEqualTo(4);
Function<Integer, Mono<Integer>> refreshFunc1 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 1);
};
List<Mono<Integer>> tasks1 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks1.add(cache.getAsync(key, value -> refreshFunc1.apply(key), forceRefresh -> true));
}
Flux<Integer> o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList()));
o1.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc1.apply(2), forceRefresh -> false).block()).isEqualTo(5);
Function<Integer, Mono<Integer>> refreshFunc2 = key -> {
numberOfCacheRefreshes.incrementAndGet();
return Mono.just(key * 2 + 3);
};
List<Mono<Integer>> tasks2 = new ArrayList<>();
for (int j = 0; j < 10; j++) {
int key = j;
tasks2.add(cache.getAsync(key, value -> refreshFunc2.apply(key), forceRefresh -> false));
}
Flux<Integer> o2 = Flux.merge(tasks2.stream().map(Mono::flux).collect(Collectors.toList()));
o2.collectList().single().block();
assertThat(numberOfCacheRefreshes.get()).isEqualTo(20);
assertThat(cache.getAsync(2, value -> refreshFunc2.apply(2), forceRefresh -> false).block()).isEqualTo(5);
} | class AsyncCacheNonBlockingTest {
private static final int TIMEOUT = 2000;
@Test(groups = {"unit"}, timeOut = TIMEOUT)
} | class AsyncCacheNonBlockingTest {
private static final int TIMEOUT = 2000;
@Test(groups = {"unit"}, timeOut = TIMEOUT)
} |
we probably can start calling the function in the constructor (no need to wait until getValueAsync()), then there is no need to have a reference to the createValueFunc any more? ``` this.value = this.createValueFunc.apply(null).cache(); ``` | public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = new AtomicReference<>();
this.refreshInProgress = new AtomicReference<>();
} | this.value = new AtomicReference<>(); | public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private AtomicReference<Mono<TValue>> value;
private final AtomicReference<Mono<TValue>> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
if (this.value != null) {
this.value.set(Mono.just(value));
}
this.refreshInProgress = new AtomicReference<>();
}
public Mono<TValue> getValueAsync() {
this.value.compareAndSet(null, this.createValueFunc.apply(null));
return this.value.get().cache();
}
public Mono<TValue> value() {
return value.get();
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
AtomicReference<TValue> originalValue = new AtomicReference<>();
return valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
}).flatMap(value -> {
if(this.refreshInProgress.compareAndSet(null, createRefreshFunction.apply(originalValue.get()))) {
return this.refreshInProgress.get().cache()
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgress.set(null);
return this.value.get();
});
}
return this.refreshInProgress.get();
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} |
Updated the code. | public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.createValueFunc = taskFactory;
this.value = new AtomicReference<>();
this.refreshInProgress = new AtomicReference<>();
} | this.value = new AtomicReference<>(); | public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
} | class AsyncLazyWithRefresh<TValue> {
private final Function<TValue, Mono<TValue>> createValueFunc;
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private AtomicReference<Mono<TValue>> value;
private final AtomicReference<Mono<TValue>> refreshInProgress;
public AsyncLazyWithRefresh(TValue value) {
this.createValueFunc = null;
if (this.value != null) {
this.value.set(Mono.just(value));
}
this.refreshInProgress = new AtomicReference<>();
}
public Mono<TValue> getValueAsync() {
this.value.compareAndSet(null, this.createValueFunc.apply(null));
return this.value.get().cache();
}
public Mono<TValue> value() {
return value.get();
}
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
AtomicReference<TValue> originalValue = new AtomicReference<>();
return valueMono.flatMap(value -> {
originalValue.set(value);
return valueMono;
}).flatMap(value -> {
if(this.refreshInProgress.compareAndSet(null, createRefreshFunction.apply(originalValue.get()))) {
return this.refreshInProgress.get().cache()
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgress.set(null);
return this.value.get();
});
}
return this.refreshInProgress.get();
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} | class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
} |
We don't need this method. We can simplify this using the `Exceptions.java` class that we have under cosmos.implementation package, and use its pre-defined function below. ``` public static boolean isNotFound(CosmosException e) { return isStatusCode(e, HttpConstants.StatusCodes.NOTFOUND); } ``` https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Exceptions.java#L25 This will keep it simple, and also we don't need to use netty's `HttpResponseStatus` class | private Boolean removeNotFoundFromCacheException(CosmosException e) {
if (e.getStatusCode() == HttpResponseStatus.NOT_FOUND.code()) {
return true;
}
return false;
} | if (e.getStatusCode() == HttpResponseStatus.NOT_FOUND.code()) { | private Boolean removeNotFoundFromCacheException(CosmosException e) {
if (Exceptions.isNotFound(e)) {
return true;
}
return false;
} | class AsyncCacheNonBlocking<TKey, TValue> {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlocking.class);
private final ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values;
private AsyncCacheNonBlocking(ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values) {
this.values = values;
}
public AsyncCacheNonBlocking() {
this(new ConcurrentHashMap<>());
}
/**
*
* <p>
* If another initialization function is already running, new initialization function will not be started.
* The result will be result of currently running initialization function.
* </p>
*
* <p>
* If previous initialization function is successfully completed it will return the value. It is possible this
* value is stale and will only be updated after the force refresh task is complete.
* Force refresh is true:
* If the key does not exist: It will create and await the new task
* If the key exists and the current task is still running: It will return the existing task
* If the key exists and the current task is already done: It will start a new task to get the updated values.
* Once the refresh task is complete it will be returned to caller.
* If it is a success the value in the cache will be updated. If the refresh task throws an exception the key will be removed from the cache.
* </p>
*
* <p>
* If previous initialization function failed - new one will be launched.
* </p>
*
* @param key Key for which to get a value.
* @param singleValueInitFunc Initialization function.
* @param forceRefresh Force refresh for refreshing the cache
* @return Cached value or value returned by initialization function.
*/
public Mono<TValue> getAsync(
TKey key,
Function<TValue, Mono<TValue>> singleValueInitFunc,
Function<TValue, Boolean> forceRefresh) {
AsyncLazyWithRefresh<TValue> initialLazyValue = values.get(key);
if (initialLazyValue != null) {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] exists", key);
}
return initialLazyValue.getValueAsync().flatMap(value -> {
if(!forceRefresh.apply(value)) {
return Mono.just(value);
}
Mono<TValue> refreshMono = initialLazyValue.createAndWaitForBackgroundRefreshTaskAsync(key, singleValueInitFunc);
return refreshMono.onErrorResume(
(exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("refresh cache [{}] resulted in error", key, exception);
}
if (initialLazyValue.shouldRemoveFromCache()) {
if (removeNotFoundFromCacheException((CosmosException)exception)) {
this.remove(key);
}
}
return Mono.error(exception);
}
);
}).onErrorResume((exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (initialLazyValue.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
});
}
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] doesn't exist, computing new value", key);
}
AsyncLazyWithRefresh<TValue> asyncLazyWithRefresh = new AsyncLazyWithRefresh<TValue>(singleValueInitFunc);
this.values.putIfAbsent(key, asyncLazyWithRefresh);
AsyncLazyWithRefresh<TValue> result = this.values.get(key);
return result.getValueAsync().onErrorResume(
(exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (result.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
}
);
}
public void set(TKey key, TValue value) {
if (logger.isDebugEnabled()) {
logger.debug("set cache[{}]={}", key, value);
}
AsyncLazyWithRefresh<TValue> updatedValue = new AsyncLazyWithRefresh<TValue>(value);
this.values.put(key, updatedValue);
}
public void remove(TKey key) {
values.remove(key);
}
/**
* This is AsyncLazy that has an additional Task that can
* be used to update the value. This allows concurrent requests
* to use the stale value while the refresh is occurring.
*/
private class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
});
}
return this.refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
}
} | class AsyncCacheNonBlocking<TKey, TValue> {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlocking.class);
private final ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values;
public AsyncCacheNonBlocking() {
this.values = new ConcurrentHashMap<>();
}
/**
*
* <p>
* If another initialization function is already running, new initialization function will not be started.
* The result will be result of currently running initialization function.
* </p>
*
* <p>
* If previous initialization function is successfully completed it will return the value. It is possible this
* value is stale and will only be updated after the force refresh task is complete.
* Force refresh is true:
* If the key does not exist: It will create and await the new task
* If the key exists and the current task is still running: It will return the existing task
* If the key exists and the current task is already done: It will start a new task to get the updated values.
* Once the refresh task is complete it will be returned to caller.
* If it is a success the value in the cache will be updated. If the refresh task throws an exception the key will be removed from the cache.
* </p>
*
* <p>
* If previous initialization function failed - new one will be launched.
* </p>
*
* @param key Key for which to get a value.
* @param singleValueInitFunc Initialization function.
* @param forceRefresh Force refresh for refreshing the cache
* @return Cached value or value returned by initialization function.
*/
public Mono<TValue> getAsync(
TKey key,
Function<TValue, Mono<TValue>> singleValueInitFunc,
Function<TValue, Boolean> forceRefresh) {
AsyncLazyWithRefresh<TValue> initialLazyValue = values.get(key);
if (initialLazyValue != null) {
logger.debug("cache[{}] exists", key);
return initialLazyValue.getValueAsync().flatMap(value -> {
if(!forceRefresh.apply(value)) {
return Mono.just(value);
}
Mono<TValue> refreshMono = initialLazyValue.createAndWaitForBackgroundRefreshTaskAsync(key, singleValueInitFunc);
return refreshMono.onErrorResume(
(exception) -> {
logger.debug("refresh cache [{}] resulted in error", key, exception);
if (initialLazyValue.shouldRemoveFromCache()) {
if (removeNotFoundFromCacheException((CosmosException)exception)) {
this.remove(key);
}
}
return Mono.error(exception);
}
);
}).onErrorResume((exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (initialLazyValue.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
});
}
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] doesn't exist, computing new value", key);
}
AsyncLazyWithRefresh<TValue> asyncLazyWithRefresh = new AsyncLazyWithRefresh<TValue>(singleValueInitFunc);
AsyncLazyWithRefresh<TValue> preResult = this.values.putIfAbsent(key, asyncLazyWithRefresh);
if (preResult == null) {
preResult = asyncLazyWithRefresh;
}
AsyncLazyWithRefresh<TValue> result = preResult;
return result.getValueAsync().onErrorResume(
(exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (result.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
}
);
}
public void set(TKey key, TValue value) {
if (logger.isDebugEnabled()) {
logger.debug("set cache[{}]={}", key, value);
}
AsyncLazyWithRefresh<TValue> updatedValue = new AsyncLazyWithRefresh<TValue>(value);
this.values.put(key, updatedValue);
}
public void remove(TKey key) {
values.remove(key);
}
/**
* This is AsyncLazy that has an additional Task that can
* be used to update the value. This allows concurrent requests
* to use the stale value while the refresh is occurring.
*/
private class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
}
} |
Thanks for pointing this out. Fixed. | private Boolean removeNotFoundFromCacheException(CosmosException e) {
if (e.getStatusCode() == HttpResponseStatus.NOT_FOUND.code()) {
return true;
}
return false;
} | if (e.getStatusCode() == HttpResponseStatus.NOT_FOUND.code()) { | private Boolean removeNotFoundFromCacheException(CosmosException e) {
if (Exceptions.isNotFound(e)) {
return true;
}
return false;
} | class AsyncCacheNonBlocking<TKey, TValue> {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlocking.class);
private final ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values;
private AsyncCacheNonBlocking(ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values) {
this.values = values;
}
public AsyncCacheNonBlocking() {
this(new ConcurrentHashMap<>());
}
/**
*
* <p>
* If another initialization function is already running, new initialization function will not be started.
* The result will be result of currently running initialization function.
* </p>
*
* <p>
* If previous initialization function is successfully completed it will return the value. It is possible this
* value is stale and will only be updated after the force refresh task is complete.
* Force refresh is true:
* If the key does not exist: It will create and await the new task
* If the key exists and the current task is still running: It will return the existing task
* If the key exists and the current task is already done: It will start a new task to get the updated values.
* Once the refresh task is complete it will be returned to caller.
* If it is a success the value in the cache will be updated. If the refresh task throws an exception the key will be removed from the cache.
* </p>
*
* <p>
* If previous initialization function failed - new one will be launched.
* </p>
*
* @param key Key for which to get a value.
* @param singleValueInitFunc Initialization function.
* @param forceRefresh Force refresh for refreshing the cache
* @return Cached value or value returned by initialization function.
*/
public Mono<TValue> getAsync(
TKey key,
Function<TValue, Mono<TValue>> singleValueInitFunc,
Function<TValue, Boolean> forceRefresh) {
AsyncLazyWithRefresh<TValue> initialLazyValue = values.get(key);
if (initialLazyValue != null) {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] exists", key);
}
return initialLazyValue.getValueAsync().flatMap(value -> {
if(!forceRefresh.apply(value)) {
return Mono.just(value);
}
Mono<TValue> refreshMono = initialLazyValue.createAndWaitForBackgroundRefreshTaskAsync(key, singleValueInitFunc);
return refreshMono.onErrorResume(
(exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("refresh cache [{}] resulted in error", key, exception);
}
if (initialLazyValue.shouldRemoveFromCache()) {
if (removeNotFoundFromCacheException((CosmosException)exception)) {
this.remove(key);
}
}
return Mono.error(exception);
}
);
}).onErrorResume((exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (initialLazyValue.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
});
}
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] doesn't exist, computing new value", key);
}
AsyncLazyWithRefresh<TValue> asyncLazyWithRefresh = new AsyncLazyWithRefresh<TValue>(singleValueInitFunc);
this.values.putIfAbsent(key, asyncLazyWithRefresh);
AsyncLazyWithRefresh<TValue> result = this.values.get(key);
return result.getValueAsync().onErrorResume(
(exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (result.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
}
);
}
public void set(TKey key, TValue value) {
if (logger.isDebugEnabled()) {
logger.debug("set cache[{}]={}", key, value);
}
AsyncLazyWithRefresh<TValue> updatedValue = new AsyncLazyWithRefresh<TValue>(value);
this.values.put(key, updatedValue);
}
public void remove(TKey key) {
values.remove(key);
}
/**
* This is AsyncLazy that has an additional Task that can
* be used to update the value. This allows concurrent requests
* to use the stale value while the refresh is occurring.
*/
private class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
});
}
return this.refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
}
} | class AsyncCacheNonBlocking<TKey, TValue> {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlocking.class);
private final ConcurrentHashMap<TKey, AsyncLazyWithRefresh<TValue>> values;
public AsyncCacheNonBlocking() {
this.values = new ConcurrentHashMap<>();
}
/**
*
* <p>
* If another initialization function is already running, new initialization function will not be started.
* The result will be result of currently running initialization function.
* </p>
*
* <p>
* If previous initialization function is successfully completed it will return the value. It is possible this
* value is stale and will only be updated after the force refresh task is complete.
* Force refresh is true:
* If the key does not exist: It will create and await the new task
* If the key exists and the current task is still running: It will return the existing task
* If the key exists and the current task is already done: It will start a new task to get the updated values.
* Once the refresh task is complete it will be returned to caller.
* If it is a success the value in the cache will be updated. If the refresh task throws an exception the key will be removed from the cache.
* </p>
*
* <p>
* If previous initialization function failed - new one will be launched.
* </p>
*
* @param key Key for which to get a value.
* @param singleValueInitFunc Initialization function.
* @param forceRefresh Force refresh for refreshing the cache
* @return Cached value or value returned by initialization function.
*/
public Mono<TValue> getAsync(
TKey key,
Function<TValue, Mono<TValue>> singleValueInitFunc,
Function<TValue, Boolean> forceRefresh) {
AsyncLazyWithRefresh<TValue> initialLazyValue = values.get(key);
if (initialLazyValue != null) {
logger.debug("cache[{}] exists", key);
return initialLazyValue.getValueAsync().flatMap(value -> {
if(!forceRefresh.apply(value)) {
return Mono.just(value);
}
Mono<TValue> refreshMono = initialLazyValue.createAndWaitForBackgroundRefreshTaskAsync(key, singleValueInitFunc);
return refreshMono.onErrorResume(
(exception) -> {
logger.debug("refresh cache [{}] resulted in error", key, exception);
if (initialLazyValue.shouldRemoveFromCache()) {
if (removeNotFoundFromCacheException((CosmosException)exception)) {
this.remove(key);
}
}
return Mono.error(exception);
}
);
}).onErrorResume((exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (initialLazyValue.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
});
}
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] doesn't exist, computing new value", key);
}
AsyncLazyWithRefresh<TValue> asyncLazyWithRefresh = new AsyncLazyWithRefresh<TValue>(singleValueInitFunc);
AsyncLazyWithRefresh<TValue> preResult = this.values.putIfAbsent(key, asyncLazyWithRefresh);
if (preResult == null) {
preResult = asyncLazyWithRefresh;
}
AsyncLazyWithRefresh<TValue> result = preResult;
return result.getValueAsync().onErrorResume(
(exception) -> {
if (logger.isDebugEnabled()) {
logger.debug("cache[{}] resulted in error", key, exception);
}
if (result.shouldRemoveFromCache()) {
this.remove(key);
}
return Mono.error(exception);
}
);
}
public void set(TKey key, TValue value) {
if (logger.isDebugEnabled()) {
logger.debug("set cache[{}]={}", key, value);
}
AsyncLazyWithRefresh<TValue> updatedValue = new AsyncLazyWithRefresh<TValue>(value);
this.values.put(key, updatedValue);
}
public void remove(TKey key) {
values.remove(key);
}
/**
* This is AsyncLazy that has an additional Task that can
* be used to update the value. This allows concurrent requests
* to use the stale value while the refresh is occurring.
*/
private class AsyncLazyWithRefresh<TValue> {
private final AtomicBoolean removeFromCache = new AtomicBoolean(false);
private final AtomicReference<Mono<TValue>> value;
private Mono<TValue> refreshInProgress;
private final AtomicBoolean refreshInProgressCompleted = new AtomicBoolean(false);
public AsyncLazyWithRefresh(TValue value) {
this.value = new AtomicReference<>();
this.value.set(Mono.just(value));
this.refreshInProgress = null;
}
public AsyncLazyWithRefresh(Function<TValue, Mono<TValue>> taskFactory) {
this.value = new AtomicReference<>();
this.value.set(taskFactory.apply(null).cache());
this.refreshInProgress = null;
}
public Mono<TValue> getValueAsync() {
return this.value.get();
}
public Mono<TValue> value() {
return value.get();
}
@SuppressWarnings("unchecked")
public Mono<TValue> createAndWaitForBackgroundRefreshTaskAsync(TKey key, Function<TValue, Mono<TValue>> createRefreshFunction) {
Mono<TValue> valueMono = this.value.get();
return valueMono.flatMap(value -> {
if(this.refreshInProgressCompleted.compareAndSet(false, true)) {
this.refreshInProgress = createRefreshFunction.apply(value).cache();
return this.refreshInProgress
.flatMap(response -> {
this.value.set(Mono.just(response));
this.refreshInProgressCompleted.set(false);
return this.value.get();
}).doOnError(e -> this.refreshInProgressCompleted.set(false));
}
return this.refreshInProgress == null ? valueMono : refreshInProgress;
});
}
public boolean shouldRemoveFromCache() {
return this.removeFromCache.compareAndSet(false, true);
}
}
} |
Added | public void createItem_withCacheRefresh() throws InterruptedException {
String containerId = "bulksplittestcontainer_" + UUID.randomUUID();
int totalRequest = getTotalRequest();
CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk");
CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block();
CosmosAsyncContainer container = createdDatabase.getContainer(containerId);
Flux<CosmosItemOperation> cosmosItemOperationFlux1 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
TestDoc testDoc = this.populateTestDoc(partitionKey);
return CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey));
});
Flux<CosmosItemOperation> cosmosItemOperationFlux2 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey);
return CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<AsyncCacheNonBlockingIntegrationTest>> responseFlux =
container.executeBulkOperations(cosmosItemOperationFlux1, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) this.bulkClient.getDocClientWrapper();
ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyBeforePartition = routingMap.keys().nextElement();
List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, this.bulkClient);
logger.info("Scaling up throughput for split");
ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000);
ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block();
logger.info("Throughput replace request submitted for {} ",
throughputResponse.getProperties().getManualThroughput());
throughputResponse = container.readThroughput().block();
while (true) {
assert throughputResponse != null;
if (!throughputResponse.isReplacePending()) {
break;
}
logger.info("Waiting for split to complete");
Thread.sleep(10 * 1000);
throughputResponse = container.readThroughput().block();
}
List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId,
this.bulkClient);
assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size())
.as("Partition ranges should increase after split");
logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size());
routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyAfterPartition = routingMap.keys().nextElement();
assertThat(cacheKeyBeforePartition).isEqualTo(cacheKeyAfterPartition);
responseFlux = container.executeBulkOperations(cosmosItemOperationFlux2, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
container.delete().block();
} | assertThat(processedDoc.get()).isEqualTo(totalRequest); | public void createItem_withCacheRefresh() throws InterruptedException {
String containerId = "bulksplittestcontainer_" + UUID.randomUUID();
int totalRequest = getTotalRequest();
CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk");
CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block();
CosmosAsyncContainer container = createdDatabase.getContainer(containerId);
Flux<CosmosItemOperation> cosmosItemOperationFlux1 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
TestDoc testDoc = this.populateTestDoc(partitionKey);
return CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey));
});
Flux<CosmosItemOperation> cosmosItemOperationFlux2 = Flux.range(0, totalRequest).map(i -> {
String partitionKey = UUID.randomUUID().toString();
EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey);
return CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<AsyncCacheNonBlockingIntegrationTest>> responseFlux =
container.executeBulkOperations(cosmosItemOperationFlux1, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) this.bulkClient.getDocClientWrapper();
ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyBeforePartition = routingMap.keys().nextElement();
List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, this.bulkClient);
logger.info("Scaling up throughput for split");
ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000);
ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block();
logger.info("Throughput replace request submitted for {} ",
throughputResponse.getProperties().getManualThroughput());
throughputResponse = container.readThroughput().block();
while (true) {
assert throughputResponse != null;
if (!throughputResponse.isReplacePending()) {
break;
}
logger.info("Waiting for split to complete");
Thread.sleep(10 * 1000);
throughputResponse = container.readThroughput().block();
}
List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId,
this.bulkClient);
assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size())
.as("Partition ranges should increase after split");
logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size());
routingMap = getRoutingMap(rxDocumentClient);
String cacheKeyAfterPartition = routingMap.keys().nextElement();
assertThat(cacheKeyBeforePartition).isEqualTo(cacheKeyAfterPartition);
responseFlux = container.executeBulkOperations(cosmosItemOperationFlux2, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap(cosmosBulkOperationResponse -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(totalRequest);
container.delete().block();
} | class AsyncCacheNonBlockingIntegrationTest extends BatchTestBase {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlockingIntegrationTest.class);
private CosmosAsyncClient bulkClient;
private CosmosAsyncDatabase createdDatabase;
@Factory(dataProvider = "simpleClientBuilderGatewaySession")
public AsyncCacheNonBlockingIntegrationTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosBulkAsyncTest() {
assertThat(this.bulkClient).isNull();
this.bulkClient = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(this.bulkClient);
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeCloseAsync(this.bulkClient);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT * 200)
private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) {
RxPartitionKeyRangeCache partitionKeyRangeCache =
ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient);
AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache =
ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache);
return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache);
}
private List<PartitionKeyRange> getPartitionKeyRanges(
String containerId, CosmosAsyncClient asyncClient) {
List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>();
AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(asyncClient);
List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient
.readPartitionKeyRanges("/dbs/" + createdDatabase.getId()
+ "/colls/" + containerId,
new CosmosQueryRequestOptions())
.collectList().block();
partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults()));
return partitionKeyRanges;
}
private int getTotalRequest() {
int countRequest = new Random().nextInt(100) + 200;
logger.info("Total count of request for this test case: " + countRequest);
return countRequest;
}
} | class AsyncCacheNonBlockingIntegrationTest extends BatchTestBase {
private final static Logger logger = LoggerFactory.getLogger(AsyncCacheNonBlockingIntegrationTest.class);
private CosmosAsyncClient bulkClient;
private CosmosAsyncDatabase createdDatabase;
@Factory(dataProvider = "simpleClientBuilderGatewaySession")
public AsyncCacheNonBlockingIntegrationTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosBulkAsyncTest() {
assertThat(this.bulkClient).isNull();
this.bulkClient = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(this.bulkClient);
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeCloseAsync(this.bulkClient);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT * 200)
private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) {
RxPartitionKeyRangeCache partitionKeyRangeCache =
ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient);
AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache =
ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache);
return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache);
}
private List<PartitionKeyRange> getPartitionKeyRanges(
String containerId, CosmosAsyncClient asyncClient) {
List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>();
AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(asyncClient);
List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient
.readPartitionKeyRanges("/dbs/" + createdDatabase.getId()
+ "/colls/" + containerId,
new CosmosQueryRequestOptions())
.collectList().block();
partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults()));
return partitionKeyRanges;
}
private int getTotalRequest() {
int countRequest = new Random().nextInt(100) + 200;
logger.info("Total count of request for this test case: " + countRequest);
return countRequest;
}
} |
I no longer have the background why we used `doPrivileged`. But directly call `accessibleObject.setAccessible(true)` here would result in StackOverflowException on javac (build), at `com.sun.tools.javac.comp.Resolve.isAccessible`, which seems to be a javac bug. | private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
} | } | private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
} | class ResourceManagerTestBase extends TestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "http:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
};
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestBase.class);
private AzureProfile testProfile;
private AuthFile testAuthFile;
private boolean isSkipInPlayback;
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a credential from file.
*
* @return A credential loaded from a file.
*/
protected TokenCredential credentialFromFile() {
return testAuthFile.getCredential();
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = testAuthFile == null ? null : testAuthFile.getClientId();
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
Map<String, String> textReplacementRules = new HashMap<>();
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
if (interceptorManager.getRecordedData() == null) {
skipInPlayback();
return;
}
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules));
httpPipeline = buildHttpPipeline(
null,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
textReplacementRules.put(PLAYBACK_URI_BASE + "1234", PLAYBACK_URI);
addTextReplacementRules(textReplacementRules);
} else {
if (System.getenv(AZURE_AUTH_LOCATION) != null) {
final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION));
try {
testAuthFile = AuthFile.parse(credFile);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e));
}
credential = testAuthFile.getCredential();
testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment());
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID);
String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set"));
}
credential = new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new TimeoutPolicy(Duration.ofMinutes(1)));
if (!interceptorManager.isLiveMode() && !testContextManager.doNotRecordTest()) {
policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules));
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
textReplacementRules.put(testProfile.getSubscriptionId(), ZERO_SUBSCRIPTION);
textReplacementRules.put(testProfile.getTenantId(), ZERO_TENANT);
textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getResourceManagerEndpoint()), PLAYBACK_URI + "/");
textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getMicrosoftGraphEndpoint()), PLAYBACK_URI + "/");
textReplacementRules.put("https:
textReplacementRules.put("https:
addTextReplacementRules(textReplacementRules);
}
initializeClients(httpPipeline, testProfile);
}
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException e) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
private void addTextReplacementRules(Map<String, String> rules) {
for (Map.Entry<String, String> entry : rules.entrySet()) {
interceptorManager.addTextReplacementRule(entry.getKey(), entry.getValue());
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (NoSuchMethodException
| IllegalAccessException
| InstantiationException
| InvocationTargetException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
} | class ResourceManagerTestBase extends TestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "http:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
};
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestBase.class);
private AzureProfile testProfile;
private AuthFile testAuthFile;
private boolean isSkipInPlayback;
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a credential from file.
*
* @return A credential loaded from a file.
*/
protected TokenCredential credentialFromFile() {
return testAuthFile.getCredential();
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = testAuthFile == null ? null : testAuthFile.getClientId();
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
Map<String, String> textReplacementRules = new HashMap<>();
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
if (interceptorManager.getRecordedData() == null) {
skipInPlayback();
return;
}
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules));
httpPipeline = buildHttpPipeline(
null,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
textReplacementRules.put(PLAYBACK_URI_BASE + "1234", PLAYBACK_URI);
addTextReplacementRules(textReplacementRules);
} else {
if (System.getenv(AZURE_AUTH_LOCATION) != null) {
final File credFile = new File(System.getenv(AZURE_AUTH_LOCATION));
try {
testAuthFile = AuthFile.parse(credFile);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Cannot parse auth file. Please check file format.", e));
}
credential = testAuthFile.getCredential();
testProfile = new AzureProfile(testAuthFile.getTenantId(), testAuthFile.getSubscriptionId(), testAuthFile.getEnvironment());
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID);
String clientSecret = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
if (clientId == null || tenantId == null || clientSecret == null || subscriptionId == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("When running tests in record mode either 'AZURE_AUTH_LOCATION' or 'AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET and AZURE_SUBSCRIPTION_ID' needs to be set"));
}
credential = new ClientSecretCredentialBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientSecret(clientSecret)
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new TimeoutPolicy(Duration.ofMinutes(1)));
if (!interceptorManager.isLiveMode() && !testContextManager.doNotRecordTest()) {
policies.add(new TextReplacementPolicy(interceptorManager.getRecordedData(), textReplacementRules));
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
textReplacementRules.put(testProfile.getSubscriptionId(), ZERO_SUBSCRIPTION);
textReplacementRules.put(testProfile.getTenantId(), ZERO_TENANT);
textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getResourceManagerEndpoint()), PLAYBACK_URI + "/");
textReplacementRules.put(Pattern.quote(AzureEnvironment.AZURE.getMicrosoftGraphEndpoint()), PLAYBACK_URI + "/");
textReplacementRules.put("https:
textReplacementRules.put("https:
addTextReplacementRules(textReplacementRules);
}
initializeClients(httpPipeline, testProfile);
}
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException e) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
private void addTextReplacementRules(Map<String, String> rules) {
for (Map.Entry<String, String> entry : rules.entrySet()) {
interceptorManager.addTextReplacementRule(entry.getKey(), entry.getValue());
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (NoSuchMethodException
| IllegalAccessException
| InstantiationException
| InvocationTargetException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
} |
follow convention in the file, i.e. `String.format` etc. | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(customEndpointAddress + " : is not a valid URL,", e));
}
return this;
} | new IllegalArgumentException(customEndpointAddress + " : is not a valid URL,", e)); | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Event Hubs service. This can be useful when your network
* does not allow connecting to the standard Azure Event Hubs endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} |
Sure, I'll check other code in the file and follow them. | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(customEndpointAddress + " : is not a valid URL,", e));
}
return this;
} | new IllegalArgumentException(customEndpointAddress + " : is not a valid URL,", e)); | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Event Hubs service. This can be useful when your network
* does not allow connecting to the standard Azure Event Hubs endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} |
For Event Hubs, It seems we support setting custom endpoint at Processor level as well (in addition to EventHubsClientBuilder) - https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/eventhubs/azure-messaging-eventhubs/CHANGELOG.md#550-2020-02-15 Should we check if its the same for SB processor as well | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | if (customEndpointAddress == null) { | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} |
Yeah, SB processor client is came from `ServiceBusClientBuilder` and different from EH processor client, we don't need to add new method in both `EventHubClientBuilder` and `EventProcessorClientBuilder` like EH does. All of the service bus client only use the same method `ServiceBusClientBuilder.getOrCreateConnectionProcessor` which use custom endpoint. Overall current code for custom endpoint works for Processor level. //cc @conniey | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | if (customEndpointAddress == null) { | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} |
Thanks for checking Zeija.. lgtm then! | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | if (customEndpointAddress == null) { | public ServiceBusClientBuilder customEndpointAddress(String customEndpointAddress) {
if (customEndpointAddress == null) {
this.customEndpointAddress = null;
return this;
}
try {
this.customEndpointAddress = new URL(customEndpointAddress);
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException(String.format("(%s) : is not a valid URL,", customEndpointAddress), e));
}
return this;
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} | class ServiceBusClientBuilder implements
TokenCredentialTrait<ServiceBusClientBuilder>,
AzureNamedKeyCredentialTrait<ServiceBusClientBuilder>,
ConnectionStringTrait<ServiceBusClientBuilder>,
AzureSasCredentialTrait<ServiceBusClientBuilder>,
AmqpTrait<ServiceBusClientBuilder>,
ConfigurationTrait<ServiceBusClientBuilder> {
private static final AmqpRetryOptions DEFAULT_RETRY =
new AmqpRetryOptions().setTryTimeout(ServiceBusConstants.OPERATION_TIMEOUT);
private static final String SERVICE_BUS_PROPERTIES_FILE = "azure-messaging-servicebus.properties";
private static final String SUBSCRIPTION_ENTITY_PATH_FORMAT = "%s/subscriptions/%s";
private static final String DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$deadletterqueue";
private static final String TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX = "/$Transfer/$deadletterqueue";
private static final int DEFAULT_PREFETCH_COUNT = 0;
private static final String NAME_KEY = "name";
private static final String VERSION_KEY = "version";
private static final String UNKNOWN = "UNKNOWN";
private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+");
private static final Duration MAX_LOCK_RENEW_DEFAULT_DURATION = Duration.ofMinutes(5);
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusClientBuilder.class);
private final Object connectionLock = new Object();
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class));
private ClientOptions clientOptions;
private Configuration configuration;
private ServiceBusConnectionProcessor sharedConnection;
private String connectionStringEntityName;
private TokenCredential credentials;
private String fullyQualifiedNamespace;
private ProxyOptions proxyOptions;
private AmqpRetryOptions retryOptions;
private Scheduler scheduler;
private AmqpTransportType transport = AmqpTransportType.AMQP;
private SslDomain.VerifyMode verifyMode;
private boolean crossEntityTransactions;
private URL customEndpointAddress;
/**
* Keeps track of the open clients that were created from this builder when there is a shared connection.
*/
private final AtomicInteger openClients = new AtomicInteger();
/**
* Creates a new instance with the default transport {@link AmqpTransportType
*/
public ServiceBusClientBuilder() {
}
/**
* Sets the {@link ClientOptions} to be sent from the client built from this builder, enabling customization of
* certain properties, as well as support the addition of custom header information. Refer to the {@link
* ClientOptions} documentation for more information.
*
* @param clientOptions to be set on the client.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the fully-qualified namespace for the Service Bus.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
private String getAndValidateFullyQualifiedNamespace() {
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return fullyQualifiedNamespace;
}
/**
* Sets a custom endpoint address when connecting to the Service Bus service. This can be useful when your network
* does not allow connecting to the standard Azure Service Bus endpoint address, but does allow connecting through
* an intermediary. For example: {@literal https:
* <p>
* If no port is specified, the default port for the {@link
* used.
*
* @param customEndpointAddress The custom endpoint address.
* @return The updated {@link ServiceBusClientBuilder} object.
* @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}.
*/
/**
* Sets the connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @param connectionString Connection string for a Service Bus namespace or a specific Service Bus resource.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder connectionString(String connectionString) {
final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString);
final TokenCredential tokenCredential;
try {
tokenCredential = getTokenCredential(properties);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(
new AzureException("Could not create the ServiceBusSharedKeyCredential.", e));
}
this.fullyQualifiedNamespace = properties.getEndpoint().getHost();
String entityPath = properties.getEntityPath();
if (!CoreUtils.isNullOrEmpty(entityPath)) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, entityPath)
.log("Setting entity from connection string.");
this.connectionStringEntityName = entityPath;
}
return credential(properties.getEndpoint().getHost(), tokenCredential);
}
/**
* Enable cross entity transaction on the connection to Service bus. Use this feature only when your transaction
* scope spans across different Service Bus entities. This feature is achieved by routing all the messages through
* one 'send-via' entity on server side as explained next.
* Once clients are created for multiple entities, the first entity that an operation occurs on becomes the
* entity through which all subsequent sends will be routed through ('send-via' entity). This enables the service to
* perform a transaction that is meant to span multiple entities. This means that subsequent entities that perform
* their first operation need to either be senders, or if they are receivers they need to be on the same entity as
* the initial entity through which all sends are routed through (otherwise the service would not be able to ensure
* that the transaction is committed because it cannot route a receive operation through a different entity). For
* instance, if you have SenderA (For entity A) and ReceiverB (For entity B) that are created from a client with
* cross-entity transactions enabled, you would need to receive first with ReceiverB to allow this to work. If you
* first send to entity A, and then attempted to receive from entity B, an exception would be thrown.
*
* <p><strong>Avoid using non-transaction API on this client</strong></p>
* Since this feature will set up connection to Service Bus optimised to enable this feature. Once all the clients
* have been setup, the first receiver or sender used will initialize 'send-via' queue as a single message transfer
* entity. All the messages will flow via this queue. Thus this client is not suitable for any non-transaction API.
*
* <p><strong>When not to enable this feature</strong></p>
* If your transaction is involved in one Service bus entity only. For example you are receiving from one
* queue/subscription and you want to settle your own messages which are part of one transaction.
*
* @return The updated {@link ServiceBusSenderClientBuilder} object.
*
* @see <a href="https:
*/
public ServiceBusClientBuilder enableCrossEntityTransactions() {
this.crossEntityTransactions = true;
return this;
}
private TokenCredential getTokenCredential(ConnectionStringProperties properties) {
TokenCredential tokenCredential;
if (properties.getSharedAccessSignature() == null) {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessKeyName(),
properties.getSharedAccessKey(), ServiceBusConstants.TOKEN_VALIDITY);
} else {
tokenCredential = new ServiceBusSharedKeyCredential(properties.getSharedAccessSignature());
}
return tokenCredential;
}
/**
* Sets the configuration store that is used during construction of the service client.
*
* If not specified, the default configuration store is used to configure Service Bus clients. Use {@link
* Configuration
*
* @param configuration The configuration store used to configure Service Bus clients.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the credential by using a {@link TokenCredential} for the Service Bus resource.
* <a href="https:
* azure-identity</a> has multiple {@link TokenCredential} implementations that can be used to authenticate
* the access to the Service Bus resource.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, TokenCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential The token credential to use for authentication. Access controls may be specified by the
* ServiceBus namespace or the requested Service Bus entity, depending on Azure configuration.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(TokenCredential credential) {
this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null.");
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureNamedKeyCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with the shared access policies for the Service Bus resource.
* You can find the shared access policies on the azure portal or Azure CLI.
* For instance, on the portal, "Shared Access policies" has 'policy' and its 'Primary Key' and 'Secondary Key'.
* The 'name' attribute of the {@link AzureNamedKeyCredential} is the 'policy' on portal and the 'key' attribute
* can be either 'Primary Key' or 'Secondary Key'.
* This method and {@link
* you to update the name and key.
*
* @param credential {@link AzureNamedKeyCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getAzureNamedKey().getName(),
credential.getAzureNamedKey().getKey(), ServiceBusConstants.TOKEN_VALIDITY);
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param fullyQualifiedNamespace The fully-qualified namespace for the Service Bus.
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
public ServiceBusClientBuilder credential(String fullyQualifiedNamespace, AzureSasCredential credential) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
Objects.requireNonNull(credential, "'credential' cannot be null.");
if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string."));
}
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the credential with Shared Access Signature for the Service Bus resource.
* Refer to <a href="https:
* Service Bus access control with Shared Access Signatures</a>.
*
* @param credential {@link AzureSasCredential} to be used for authentication.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder credential(AzureSasCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
this.credentials = new ServiceBusSharedKeyCredential(credential.getSignature());
return this;
}
/**
* Sets the proxy configuration to use for {@link ServiceBusSenderAsyncClient}. When a proxy is configured, {@link
* AmqpTransportType
*
* @param proxyOptions The proxy configuration to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder proxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Package-private method that sets the verify mode for this connection.
*
* @param verifyMode The verification mode.
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) {
this.verifyMode = verifyMode;
return this;
}
/**
* Sets the retry options for Service Bus clients. If not specified, the default retry options are used.
*
* @param retryOptions The retry options to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder retryOptions(AmqpRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the scheduler to use.
*
* @param scheduler Scheduler to be used.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
ServiceBusClientBuilder scheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
/**
* Sets the transport type by which all the communication with Azure Service Bus occurs. Default value is {@link
* AmqpTransportType
*
* @param transportType The transport type to use.
*
* @return The updated {@link ServiceBusClientBuilder} object.
*/
@Override
public ServiceBusClientBuilder transportType(AmqpTransportType transportType) {
this.transport = transportType;
return this;
}
/**
* A new instance of {@link ServiceBusSenderClientBuilder} used to configure Service Bus message senders.
*
* @return A new instance of {@link ServiceBusSenderClientBuilder}.
*/
public ServiceBusSenderClientBuilder sender() {
return new ServiceBusSenderClientBuilder();
}
/**
* A new instance of {@link ServiceBusReceiverClientBuilder} used to configure Service Bus message receivers.
*
* @return A new instance of {@link ServiceBusReceiverClientBuilder}.
*/
public ServiceBusReceiverClientBuilder receiver() {
return new ServiceBusReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionReceiverClientBuilder} used to configure <b>session aware</b> Service
* Bus message receivers.
*
* @return A new instance of {@link ServiceBusSessionReceiverClientBuilder}.
*/
public ServiceBusSessionReceiverClientBuilder sessionReceiver() {
return new ServiceBusSessionReceiverClientBuilder();
}
/**
* A new instance of {@link ServiceBusProcessorClientBuilder} used to configure {@link ServiceBusProcessorClient}
* instance.
*
* @return A new instance of {@link ServiceBusProcessorClientBuilder}.
*/
public ServiceBusProcessorClientBuilder processor() {
return new ServiceBusProcessorClientBuilder();
}
/**
* A new instance of {@link ServiceBusSessionProcessorClientBuilder} used to configure a Service Bus processor
* instance that processes sessions.
* @return A new instance of {@link ServiceBusSessionProcessorClientBuilder}.
*/
public ServiceBusSessionProcessorClientBuilder sessionProcessor() {
return new ServiceBusSessionProcessorClientBuilder();
}
/**
* Called when a child client is closed. Disposes of the shared connection if there are no more clients.
*/
void onClientClose() {
synchronized (connectionLock) {
final int numberOfOpenClients = openClients.decrementAndGet();
LOGGER.atInfo()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("Closing a dependent client.");
if (numberOfOpenClients > 0) {
return;
}
if (numberOfOpenClients < 0) {
LOGGER.atWarning()
.addKeyValue("numberOfOpenClients", numberOfOpenClients)
.log("There should not be less than 0 clients.");
}
LOGGER.info("No more open clients, closing shared connection.");
if (sharedConnection != null) {
sharedConnection.dispose();
sharedConnection = null;
} else {
LOGGER.warning("Shared ServiceBusConnectionProcessor was already disposed.");
}
}
}
private ServiceBusConnectionProcessor getOrCreateConnectionProcessor(MessageSerializer serializer) {
if (retryOptions == null) {
retryOptions = DEFAULT_RETRY;
}
if (scheduler == null) {
scheduler = Schedulers.elastic();
}
synchronized (connectionLock) {
if (sharedConnection == null) {
final ConnectionOptions connectionOptions = getConnectionOptions();
final Flux<ServiceBusAmqpConnection> connectionFlux = Mono.fromCallable(() -> {
final String connectionId = StringUtil.getRandomString("MF");
final ReactorProvider provider = new ReactorProvider();
final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider);
final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider(
connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(),
connectionOptions.getAuthorizationScope());
return (ServiceBusAmqpConnection) new ServiceBusReactorAmqpConnection(connectionId,
connectionOptions, provider, handlerProvider, tokenManagerProvider, serializer,
crossEntityTransactions);
}).repeat();
sharedConnection = connectionFlux.subscribeWith(new ServiceBusConnectionProcessor(
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry()));
}
}
final int numberOfOpenClients = openClients.incrementAndGet();
LOGGER.info("
return sharedConnection;
}
private ConnectionOptions getConnectionOptions() {
configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration;
if (credentials == null) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. "
+ "They can be set using: connectionString(String), connectionString(String, String), "
+ "or credentials(String, String, TokenCredential)"
));
}
if (proxyOptions != null && proxyOptions.isProxyAddressConfigured()
&& transport != AmqpTransportType.AMQP_WEB_SOCKETS) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Cannot use a proxy when TransportType is not AMQP."));
}
if (proxyOptions == null) {
proxyOptions = getDefaultProxyConfiguration(configuration);
}
final CbsAuthorizationType authorizationType = credentials instanceof ServiceBusSharedKeyCredential
? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE
: CbsAuthorizationType.JSON_WEB_TOKEN;
final SslDomain.VerifyMode verificationMode = verifyMode != null
? verifyMode
: SslDomain.VerifyMode.VERIFY_PEER_NAME;
final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions();
final Map<String, String> properties = CoreUtils.getProperties(SERVICE_BUS_PROPERTIES_FILE);
final String product = properties.getOrDefault(NAME_KEY, UNKNOWN);
final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN);
if (customEndpointAddress == null) {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion);
} else {
return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType,
ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler,
options, verificationMode, product, clientVersion, customEndpointAddress.getHost(),
customEndpointAddress.getPort());
}
}
private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) {
ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE;
if (proxyOptions != null) {
authentication = proxyOptions.getAuthentication();
}
String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY);
if (CoreUtils.isNullOrEmpty(proxyAddress)) {
return ProxyOptions.SYSTEM_DEFAULTS;
}
return getProxyOptions(authentication, proxyAddress, configuration,
Boolean.parseBoolean(configuration.get("java.net.useSystemProxies")));
}
private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress,
Configuration configuration, boolean useSystemProxies) {
String host;
int port;
if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) {
final String[] hostPort = proxyAddress.split(":");
host = hostPort[0];
port = Integer.parseInt(hostPort[1]);
final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port));
final String username = configuration.get(ProxyOptions.PROXY_USERNAME);
final String password = configuration.get(ProxyOptions.PROXY_PASSWORD);
return new ProxyOptions(authentication, proxy, username, password);
} else if (useSystemProxies) {
com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions
.fromConfiguration(configuration);
return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(),
coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword());
} else {
LOGGER.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't "
+ "set or was false.");
return ProxyOptions.SYSTEM_DEFAULTS;
}
}
private static boolean isNullOrEmpty(String item) {
return item == null || item.isEmpty();
}
private static MessagingEntityType validateEntityPaths(String connectionStringEntityName,
String topicName, String queueName) {
final boolean hasTopicName = !isNullOrEmpty(topicName);
final boolean hasQueueName = !isNullOrEmpty(queueName);
final boolean hasConnectionStringEntity = !isNullOrEmpty(connectionStringEntityName);
final MessagingEntityType entityType;
if (!hasConnectionStringEntity && !hasQueueName && !hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(
"Cannot build client without setting either a queueName or topicName."));
} else if (hasQueueName && hasTopicName) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"Cannot build client with both queueName (%s) and topicName (%s) set.", queueName, topicName)));
} else if (hasQueueName) {
if (hasConnectionStringEntity && !queueName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"queueName (%s) is different than the connectionString's EntityPath (%s).",
queueName, connectionStringEntityName)));
}
entityType = MessagingEntityType.QUEUE;
} else if (hasTopicName) {
if (hasConnectionStringEntity && !topicName.equals(connectionStringEntityName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) is different than the connectionString's EntityPath (%s).",
topicName, connectionStringEntityName)));
}
entityType = MessagingEntityType.SUBSCRIPTION;
} else {
entityType = MessagingEntityType.UNKNOWN;
}
return entityType;
}
private static String getEntityPath(MessagingEntityType entityType, String queueName,
String topicName, String subscriptionName, SubQueue subQueue) {
String entityPath;
switch (entityType) {
case QUEUE:
entityPath = queueName;
break;
case SUBSCRIPTION:
if (isNullOrEmpty(subscriptionName)) {
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalStateException(String.format(
"topicName (%s) must have a subscriptionName associated with it.", topicName)));
}
entityPath = String.format(Locale.ROOT, SUBSCRIPTION_ENTITY_PATH_FORMAT, topicName,
subscriptionName);
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
if (subQueue == null) {
return entityPath;
}
switch (subQueue) {
case NONE:
break;
case TRANSFER_DEAD_LETTER_QUEUE:
entityPath += TRANSFER_DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
case DEAD_LETTER_QUEUE:
entityPath += DEAD_LETTER_QUEUE_NAME_SUFFIX;
break;
default:
throw ServiceBusClientBuilder.LOGGER.logExceptionAsError(new IllegalArgumentException("Unsupported value of subqueue type: "
+ subQueue));
}
return entityPath;
}
/**
* Builder for creating {@link ServiceBusSenderClient} and {@link ServiceBusSenderAsyncClient} to publish messages
* to Service Bus.
*
* @see ServiceBusSenderAsyncClient
* @see ServiceBusSenderClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusSenderClient.class, ServiceBusSenderAsyncClient.class})
public final class ServiceBusSenderClientBuilder {
private String queueName;
private String topicName;
private ServiceBusSenderClientBuilder() {
}
/**
* Sets the name of the Service Bus queue to publish messages to.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the name of the Service Bus topic to publish messages to.
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSenderClientBuilder} object.
*/
public ServiceBusSenderClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link
* ServiceBusMessage} to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderAsyncClient buildAsyncClient() {
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityName;
switch (entityType) {
case QUEUE:
entityName = queueName;
break;
case SUBSCRIPTION:
entityName = topicName;
break;
case UNKNOWN:
entityName = connectionStringEntityName;
break;
default:
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Unknown entity type: " + entityType));
}
return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, null);
}
/**
* Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage}
* to a Service Bus queue or topic.
*
* @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
* @throws IllegalArgumentException if the entity type is not a queue or a topic.
*/
public ServiceBusSenderClient buildClient() {
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions));
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a session-based Service Bus
* entity. {@link ServiceBusProcessorClient} processes messages and errors via {@link
* and {@link
* next session to process.
*
* <p>
* By default, the processor:
* <ul>
* <li>Automatically settles messages. Disabled via {@link
* <li>Processes 1 session concurrently. Configured via {@link
* <li>Invokes 1 instance of {@link
* {@link
* </ul>
*
* <p><strong>Instantiate a session-enabled processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSessionId&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient sessionProcessor = new ServiceBusClientBuilder&
* .connectionString&
* .sessionProcessor&
* .queueName&
* .maxConcurrentSessions&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* sessionProcessor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusSessionProcessorClientBuilder {
private final ServiceBusProcessorClientOptions processorClientOptions;
private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusSessionProcessorClientBuilder() {
sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
sessionReceiverClientBuilder.maxConcurrentSessions(1);
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
sessionReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1"));
}
sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions);
return this;
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
* Using a non-zero prefetch risks of losing messages even though it has better performance.
* @see <a href="https:
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) {
sessionReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder queueName(String queueName) {
sessionReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
sessionReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionProcessorClientBuilder subQueue(SubQueue subQueue) {
this.sessionReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) {
sessionReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
* @see
*/
public ServiceBusSessionProcessorClientBuilder topicName(String topicName) {
sessionReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor that will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusSessionProcessorClientBuilder processError(
Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Max concurrent messages that this processor should process.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
*
* @return The updated {@link ServiceBusSessionProcessorClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentCalls} is less than 1.
*/
public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusSessionProcessorClientBuilder} object.
*/
public ServiceBusSessionProcessorClientBuilder disableAutoComplete() {
sessionReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates a <b>session-aware</b> Service Bus processor responsible for reading
* {@link ServiceBusReceivedMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(sessionReceiverClientBuilder,
sessionReceiverClientBuilder.queueName, sessionReceiverClientBuilder.topicName,
sessionReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from a <b>session aware</b> Service Bus entity.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusSessionReceiverClientBuilder {
private boolean enableAutoComplete = true;
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private SubQueue subQueue = SubQueue.NONE;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ServiceBusReceiveMode
* mode, auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusSessionReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null,
maxConcurrentSessions);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or
* subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link
* ServiceBusMessage messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusSessionReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusSessionReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions);
return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(),
entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer,
ServiceBusClientBuilder.this::onClientClose);
}
}
/**
* Builder for creating {@link ServiceBusProcessorClient} to consume messages from a Service Bus entity.
* {@link ServiceBusProcessorClient ServiceBusProcessorClients} provides a push-based mechanism that notifies
* the message processing callback when a message is received or the error handle when an error is observed. To
* create an instance, therefore, configuring the two callbacks - {@link
* {@link
* with auto-completion and auto-lock renewal capabilities.
*
* <p><strong>Sample code to instantiate a processor client</strong></p>
* <!-- src_embed com.azure.messaging.servicebus.servicebusprocessorclient
* <pre>
* Consumer<ServiceBusReceivedMessageContext> onMessage = context -> &
* ServiceBusReceivedMessage message = context.getMessage&
* System.out.printf&
* message.getSequenceNumber&
* &
*
* Consumer<ServiceBusErrorContext> onError = context -> &
* System.out.printf&
* context.getFullyQualifiedNamespace&
*
* if &
* ServiceBusException exception = &
* System.out.printf&
* exception.getReason&
* &
* System.out.printf&
* &
* &
*
* &
*
* ServiceBusProcessorClient processor = new ServiceBusClientBuilder&
* .connectionString&
* .processor&
* .queueName&
* .processMessage&
* .processError&
* .buildProcessorClient&
*
* &
* processor.start&
* </pre>
* <!-- end com.azure.messaging.servicebus.servicebusprocessorclient
*
* @see ServiceBusProcessorClient
*/
public final class ServiceBusProcessorClientBuilder {
private final ServiceBusReceiverClientBuilder serviceBusReceiverClientBuilder;
private final ServiceBusProcessorClientOptions processorClientOptions;
private Consumer<ServiceBusReceivedMessageContext> processMessage;
private Consumer<ServiceBusErrorContext> processError;
private ServiceBusProcessorClientBuilder() {
serviceBusReceiverClientBuilder = new ServiceBusReceiverClientBuilder();
processorClientOptions = new ServiceBusProcessorClientOptions()
.setMaxConcurrentCalls(1)
.setTracerProvider(tracerProvider);
}
/**
* Sets the prefetch count of the processor. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application starts the processor.
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder prefetchCount(int prefetchCount) {
serviceBusReceiverClientBuilder.prefetchCount(prefetchCount);
return this;
}
/**
* Sets the name of the queue to create a processor for.
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder queueName(String queueName) {
serviceBusReceiverClientBuilder.queueName(queueName);
return this;
}
/**
* Sets the receive mode for the processor.
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
serviceBusReceiverClientBuilder.receiveMode(receiveMode);
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to. Azure Service Bus queues and subscriptions provide a
* secondary sub-queue, called a dead-letter queue (DLQ).
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
* @see SubQueue
*/
public ServiceBusProcessorClientBuilder subQueue(SubQueue subQueue) {
serviceBusReceiverClientBuilder.subQueue(subQueue);
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder subscriptionName(String subscriptionName) {
serviceBusReceiverClientBuilder.subscriptionName(subscriptionName);
return this;
}
/**
* Sets the name of the topic. <b>{@link
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
* @see
*/
public ServiceBusProcessorClientBuilder topicName(String topicName) {
serviceBusReceiverClientBuilder.topicName(topicName);
return this;
}
/**
* The message processing callback for the processor which will be executed when a message is received.
* @param processMessage The message processing consumer that will be executed when a message is received.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder processMessage(
Consumer<ServiceBusReceivedMessageContext> processMessage) {
this.processMessage = processMessage;
return this;
}
/**
* The error handler for the processor which will be invoked in the event of an error while receiving messages.
* @param processError The error handler which will be executed when an error occurs.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object
*/
public ServiceBusProcessorClientBuilder processError(Consumer<ServiceBusErrorContext> processError) {
this.processError = processError;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusProcessorClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
serviceBusReceiverClientBuilder.maxAutoLockRenewDuration(maxAutoLockRenewDuration);
return this;
}
/**
* Max concurrent messages that this processor should process. By default, this is set to 1.
*
* @param maxConcurrentCalls max concurrent messages that this processor should process.
* @return The updated {@link ServiceBusProcessorClientBuilder} object.
* @throws IllegalArgumentException if the {@code maxConcurrentCalls} is set to a value less than 1.
*/
public ServiceBusProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) {
if (maxConcurrentCalls < 1) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1"));
}
processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls);
return this;
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceivedMessageContext
* the message is processed, it is {@link ServiceBusReceivedMessageContext
* abandoned}.
*
* @return The modified {@link ServiceBusProcessorClientBuilder} object.
*/
public ServiceBusProcessorClientBuilder disableAutoComplete() {
serviceBusReceiverClientBuilder.disableAutoComplete();
processorClientOptions.setDisableAutoComplete(true);
return this;
}
/**
* Creates Service Bus message processor responsible for reading {@link ServiceBusReceivedMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusProcessorClient} that processes messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws NullPointerException if the {@link
* callbacks are not set.
*/
public ServiceBusProcessorClient buildProcessorClient() {
return new ServiceBusProcessorClient(serviceBusReceiverClientBuilder,
serviceBusReceiverClientBuilder.queueName, serviceBusReceiverClientBuilder.topicName,
serviceBusReceiverClientBuilder.subscriptionName,
Objects.requireNonNull(processMessage, "'processMessage' cannot be null"),
Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions);
}
}
/**
* Builder for creating {@link ServiceBusReceiverClient} and {@link ServiceBusReceiverAsyncClient} to consume
* messages from Service Bus.
*
* @see ServiceBusReceiverAsyncClient
* @see ServiceBusReceiverClient
*/
@ServiceClientBuilder(serviceClients = {ServiceBusReceiverClient.class, ServiceBusReceiverAsyncClient.class})
public final class ServiceBusReceiverClientBuilder {
private boolean enableAutoComplete = true;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private SubQueue subQueue;
private ServiceBusReceiveMode receiveMode = ServiceBusReceiveMode.PEEK_LOCK;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusReceiverClientBuilder() {
}
/**
* Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is
* {@link ServiceBusReceiverAsyncClient
* the message is processed, it is {@link ServiceBusReceiverAsyncClient
* abandoned}.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder disableAutoComplete() {
this.enableAutoComplete = false;
return this;
}
/**
* Sets the amount of time to continue auto-renewing the lock. Setting {@link Duration
* disables auto-renewal. For {@link ServiceBusReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the lock. {@link Duration
* or {@code null} indicates that auto-renewal is disabled.
*
* @return The updated {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ServiceBusReceiveMode
* {@link ServiceBusReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch
* off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
*/
public ServiceBusReceiverClientBuilder receiveMode(ServiceBusReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the type of the {@link SubQueue} to connect to.
*
* @param subQueue The type of the sub queue.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subQueue(SubQueue subQueue) {
this.subQueue = subQueue;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusReceiverClientBuilder} object.
* @see
*/
public ServiceBusReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage
* messages} from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Creates <b>synchronous</b> Service Bus receiver responsible for reading {@link ServiceBusMessage messages}
* from a specific queue or subscription.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or subscription.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
final boolean isPrefetchDisabled = prefetchCount == 0;
return new ServiceBusReceiverClient(buildAsyncClient(false),
isPrefetchDisabled,
MessageUtils.getTotalTimeout(retryOptions));
}
ServiceBusReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) {
final MessagingEntityType entityType = validateEntityPaths(connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(entityType, queueName, topicName, subscriptionName,
subQueue);
if (!isAutoCompleteAllowed && enableAutoComplete) {
LOGGER.warning(
"'enableAutoComplete' is not supported in synchronous client except through callback receive.");
enableAutoComplete = false;
} else if (enableAutoComplete && receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
LOGGER.warning("'enableAutoComplete' is not needed in for RECEIVE_AND_DELETE mode.");
enableAutoComplete = false;
}
if (receiveMode == ServiceBusReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
maxAutoLockRenewDuration, enableAutoComplete);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose);
}
}
private void validateAndThrow(int prefetchCount) {
if (prefetchCount < 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"prefetchCount (%s) cannot be less than 0.", prefetchCount)));
}
}
private void validateAndThrow(Duration maxLockRenewalDuration) {
if (maxLockRenewalDuration != null && maxLockRenewalDuration.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"'maxLockRenewalDuration' cannot be negative."));
}
}
} |
please run the playback test (without "RECORD") locally. this line and some more is wrong. In test env on CI, there is no such environment variable defined. You had to give them a default value. | protected void beforeTest() {
try {
ConfidentialLedgerIdentityClientBuilder confidentialLedgerIdentityClientbuilder = new ConfidentialLedgerIdentityClientBuilder()
.identityServiceUri(
Configuration.getGlobalConfiguration().get("IDENTITYSERVICEURI", "identityserviceuri"))
.httpClient(HttpClient.createDefault())
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerIdentityClientbuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerIdentityClientbuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerIdentityClientbuilder.credential(new DefaultAzureCredentialBuilder().build());
}
ConfidentialLedgerIdentityClient confidentialLedgerIdentityClient = confidentialLedgerIdentityClientbuilder
.buildClient();
String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERURI")
.replaceAll("\\w+:
.replaceAll("\\..*", "");
Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient
.getLedgerIdentityWithResponse(ledgerId, null);
BinaryData identityResponse = ledgerIdentityWithResponse.getValue();
ObjectMapper mapper = new ObjectMapper();
JsonNode jsonNode = mapper.readTree(identityResponse.toBytes());
String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText();
SslContext sslContext = SslContextBuilder.forClient()
.trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build();
reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build();
System.out.println("Creating Confidential Ledger client with the certificate...");
ConfidentialLedgerClientBuilder confidentialLedgerClientbuilder = new ConfidentialLedgerClientBuilder()
.ledgerUri(Configuration.getGlobalConfiguration().get("LEDGERURI", "ledgeruri"))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerClientbuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerClientbuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerClientbuilder.credential(new AzureCliCredentialBuilder().build());
}
confidentialLedgerClient = confidentialLedgerClientbuilder.buildClient();
} catch (Exception ex) {
System.out.println("Error thrown from ConfidentialLedgerClientTestBase:" + ex);
}
} | .replaceAll("\\..*", ""); | protected void beforeTest() {
try {
ConfidentialLedgerIdentityClientBuilder confidentialLedgerIdentityClientbuilder = new ConfidentialLedgerIdentityClientBuilder()
.identityServiceUri(
Configuration.getGlobalConfiguration().get("IDENTITYSERVICEURI", "https:
.httpClient(HttpClient.createDefault())
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerIdentityClientbuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerIdentityClientbuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerIdentityClientbuilder.credential(new DefaultAzureCredentialBuilder().build());
}
confidentialLedgerIdentityClient = confidentialLedgerIdentityClientbuilder
.buildClient();
String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests");
confidentialLedgerClientBuilder = new ConfidentialLedgerClientBuilder()
.ledgerUri(Configuration.getGlobalConfiguration().get("LEDGERURI", "https:
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerClientBuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerClientBuilder.credential(new AzureCliCredentialBuilder().build());
}
} catch (Exception ex) {
System.out.println("Error thrown from ConfidentialLedgerClientTestBase:" + ex);
}
} | class ConfidentialLedgerClientTestBase extends TestBase {
protected ConfidentialLedgerClient confidentialLedgerClient;
@Override
} | class ConfidentialLedgerClientTestBase extends TestBase {
protected ConfidentialLedgerClient confidentialLedgerClient;
protected ConfidentialLedgerClientBuilder confidentialLedgerClientBuilder;
protected ConfidentialLedgerIdentityClient confidentialLedgerIdentityClient;
@Override
} |
I've added this as a follow-up task for GA | public static String bufferJsonObject(JsonReader jsonReader) {
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.isStartArrayOrObject()) {
return jsonReader.readChildren();
} else if (jsonReader.currentToken() == JsonToken.FIELD_NAME) {
StringBuilder json = new StringBuilder("{");
JsonToken token = jsonReader.currentToken();
boolean needsComa = false;
while (token != JsonToken.END_OBJECT) {
if (needsComa) {
json.append(",");
}
if (token == JsonToken.FIELD_NAME) {
json.append("\"").append(jsonReader.getFieldName()).append("\":");
needsComa = false;
} else {
if (token == JsonToken.STRING) {
json.append("\"").append(jsonReader.getStringValue()).append("\"");
} else if (jsonReader.isStartArrayOrObject()) {
jsonReader.readChildren(json);
} else {
json.append(jsonReader.getTextValue());
}
needsComa = true;
}
token = jsonReader.nextToken();
}
return json.toString();
} else {
throw new IllegalStateException("Cannot buffer a JSON object from a non-array, non-object, non-field name "
+ "starting location. Starting location: " + jsonReader.currentToken());
}
}
private JsonUtils() {
}
} | + "starting location. Starting location: " + jsonReader.currentToken()); | public static String bufferJsonObject(JsonReader jsonReader) {
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.isStartArrayOrObject()) {
return jsonReader.readChildren();
} else if (jsonReader.currentToken() == JsonToken.FIELD_NAME) {
StringBuilder json = new StringBuilder("{");
JsonToken token = jsonReader.currentToken();
boolean needsComa = false;
while (token != JsonToken.END_OBJECT) {
if (needsComa) {
json.append(",");
}
if (token == JsonToken.FIELD_NAME) {
json.append("\"").append(jsonReader.getFieldName()).append("\":");
needsComa = false;
} else {
if (token == JsonToken.STRING) {
json.append("\"").append(jsonReader.getStringValue()).append("\"");
} else if (jsonReader.isStartArrayOrObject()) {
jsonReader.readChildren(json);
} else {
json.append(jsonReader.getTextValue());
}
needsComa = true;
}
token = jsonReader.nextToken();
}
return json.toString();
} else {
throw new IllegalStateException("Cannot buffer a JSON object from a non-array, non-object, non-field name "
+ "starting location. Starting location: " + jsonReader.currentToken());
}
}
private JsonUtils() {
}
} | class JsonUtils {
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, T[] array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return jsonWriter.writeNullField(fieldName).flush();
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, Iterable<T> array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return jsonWriter.writeNullField(fieldName).flush();
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes a map.
*
* @param jsonWriter The {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the map.
* @param map The map.
* @param entryWriterFunc Function that writes the map entry value.
* @param <T> Type of map value.
* @return The updated {@link JsonWriter} object.
*/
public static <T> JsonWriter writeMap(JsonWriter jsonWriter, String fieldName, Map<String, T> map,
BiConsumer<JsonWriter, T> entryWriterFunc) {
if (map == null) {
return jsonWriter.writeNullField(fieldName).flush();
}
jsonWriter.writeStartObject(fieldName);
for (Map.Entry<String, T> entry : map.entrySet()) {
jsonWriter.writeFieldName(entry.getKey());
entryWriterFunc.accept(jsonWriter, entry.getValue());
}
return jsonWriter.writeEndObject();
}
/**
* Handles basic logic for deserializing an object before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for object reading and then check if the current token is
* {@link JsonToken
* {@link JsonToken
* starting location to support partial object reads.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic, passing the reader and current
* token.
* @param <T> The type of object that is being deserialized.
* @return The deserialized object, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> T readObject(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
JsonToken currentToken = jsonReader.currentToken();
if (currentToken == null) {
currentToken = jsonReader.nextToken();
}
if (currentToken == JsonToken.NULL) {
return null;
} else if (currentToken == JsonToken.END_OBJECT || currentToken == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
return deserializationFunc.apply(jsonReader);
}
/**
* Handles basic logic for deserializing an array before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for array reading and then check if the current token is
* {@link JsonToken
* {@link IllegalStateException}.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic.
* @param <T> The type of array element that is being deserialized.
* @return The deserialized array, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> List<T> readArray(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
if (jsonReader.currentToken() == null) {
jsonReader.nextToken();
}
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.currentToken() != JsonToken.START_ARRAY) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
List<T> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(deserializationFunc.apply(jsonReader));
}
return array;
}
/**
* Reads the {@link JsonReader} as an untyped object.
* <p>
* The returned object is one of the following:
*
* <ul>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* </ul>
*
* If the {@link JsonReader
* {@link JsonToken
* with the ending of an array or object or with the name of a field.
*
* @param jsonReader The {@link JsonReader} that will be read into an untyped object.
* @return The untyped object based on the description.
* @throws IllegalStateException If the {@link JsonReader
* {@link JsonToken
*/
public static Object readUntypedField(JsonReader jsonReader) {
return readUntypedField(jsonReader, 0);
}
private static Object readUntypedField(JsonReader jsonReader, int depth) {
if (depth >= 1000) {
throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000.");
}
JsonToken token = jsonReader.currentToken();
if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin an untyped field: " + token);
}
if (token == JsonToken.NULL) {
return null;
} else if (token == JsonToken.BOOLEAN) {
return jsonReader.getBooleanValue();
} else if (token == JsonToken.NUMBER) {
String numberText = jsonReader.getTextValue();
if (numberText.contains(".")) {
return Double.parseDouble(numberText);
} else {
return Long.parseLong(numberText);
}
} else if (token == JsonToken.STRING) {
return jsonReader.getStringValue();
} else if (token == JsonToken.START_ARRAY) {
List<Object> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(readUntypedField(jsonReader, depth + 1));
}
return array;
} else if (token == JsonToken.START_OBJECT) {
Map<String, Object> object = new LinkedHashMap<>();
while (jsonReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = jsonReader.getFieldName();
jsonReader.nextToken();
Object value = readUntypedField(jsonReader, depth + 1);
object.put(fieldName, value);
}
return object;
}
throw new IllegalStateException("Unknown token type while reading an untyped field: " + token);
}
/**
* Writes the {@code value} as an untyped field to the {@link JsonWriter}.
*
* @param jsonWriter The {@link JsonWriter} that will be written.
* @param value The value to write.
* @return The updated {@code jsonWriter} with the {@code value} written to it.
*/
public static JsonWriter writeUntypedField(JsonWriter jsonWriter, Object value) {
if (value == null) {
return jsonWriter.writeNull().flush();
} else if (value instanceof Short) {
return jsonWriter.writeInt((short) value).flush();
} else if (value instanceof Integer) {
return jsonWriter.writeInt((int) value).flush();
} else if (value instanceof Long) {
return jsonWriter.writeLong((long) value).flush();
} else if (value instanceof Float) {
return jsonWriter.writeFloat((float) value).flush();
} else if (value instanceof Double) {
return jsonWriter.writeDouble((double) value).flush();
} else if (value instanceof Boolean) {
return jsonWriter.writeBoolean((boolean) value).flush();
} else if (value instanceof byte[]) {
return jsonWriter.writeBinary((byte[]) value).flush();
} else if (value instanceof CharSequence) {
return jsonWriter.writeString(String.valueOf(value)).flush();
} else if (value instanceof JsonSerializable<?>) {
return ((JsonSerializable<?>) value).toJson(jsonWriter).flush();
} else if (value.getClass() == Object.class) {
return jsonWriter.writeStartObject().writeEndObject().flush();
} else {
return jsonWriter.writeString(String.valueOf(value)).flush();
}
}
/**
* Gets the nullable JSON property as null if the {@link JsonReader JsonReader's} {@link JsonReader
* is {@link JsonToken
*
* @param jsonReader The {@link JsonReader} being read.
* @param nonNullGetter The non-null getter.
* @param <T> The type of the property.
* @return Either null if the current token is {@link JsonToken
* {@code nonNullGetter}.
*/
public static <T> T getNullableProperty(JsonReader jsonReader, Function<JsonReader, T> nonNullGetter) {
return jsonReader.currentToken() == JsonToken.NULL ? null : nonNullGetter.apply(jsonReader);
}
/**
* Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current
* location of {@code jsonReader}.
* <p>
* If the {@code jsonReader} is pointing to {@link JsonToken
* JSON object will be read until completion and returned as a raw JSON string.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The buffered JSON object the {@link JsonReader} was pointing to, or null if it was pointing to
* {@link JsonToken
* @throws IllegalStateException If the {@code jsonReader}'s {@link JsonReader
* one of {@link JsonToken
* {@link JsonToken
*/ | class JsonUtils {
/**
* Serializes an array.
* <p>
* Handles two scenarios for the array:
*
* <ul>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* If a null array should be written as JSON null use
* {@link
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, T[] array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
return writeArray(jsonWriter, fieldName, array, false, elementWriterFunc);
}
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null, iff {@code writeNull} is true</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param writeNull Whether JSON null should be written if {@code array} is null.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null and {@code writeNull} is
* false.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, T[] array, boolean writeNull,
BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return writeNull ? jsonWriter.writeNullField(fieldName).flush() : jsonWriter;
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes an array.
* <p>
* Handles two scenarios for the array:
*
* <ul>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* If a null array should be written as JSON null use
* {@link
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, Iterable<T> array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
return writeArray(jsonWriter, fieldName, array, false, elementWriterFunc);
}
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null, iff {@code writeNull} is true</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param writeNull Whether JSON null should be written if {@code array} is null.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null and {@code writeNull} is
* false.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, Iterable<T> array,
boolean writeNull, BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return writeNull ? jsonWriter.writeNullField(fieldName).flush() : jsonWriter;
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes a map.
* <p>
* If the map is null this method is a no-op. Use {@link
* and passed true for {@code writeNull} if JSON null should be written.
*
* @param jsonWriter The {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the map.
* @param map The map.
* @param entryWriterFunc Function that writes the map entry value.
* @param <T> Type of map value.
* @return The updated {@link JsonWriter} object, or a no-op if {@code map} is null
*/
public static <T> JsonWriter writeMap(JsonWriter jsonWriter, String fieldName, Map<String, T> map,
BiConsumer<JsonWriter, T> entryWriterFunc) {
return writeMap(jsonWriter, fieldName, map, false, entryWriterFunc);
}
/**
* Serializes a map.
* <p>
* If {@code map} is null and {@code writeNull} is false this method is effectively a no-op.
*
* @param jsonWriter The {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the map.
* @param map The map.
* @param writeNull Whether JSON null should be written if {@code map} is null.
* @param entryWriterFunc Function that writes the map entry value.
* @param <T> Type of map value.
* @return The updated {@link JsonWriter} object, or a no-op if {@code map} is null and {@code writeNull} is false.
*/
public static <T> JsonWriter writeMap(JsonWriter jsonWriter, String fieldName, Map<String, T> map,
boolean writeNull, BiConsumer<JsonWriter, T> entryWriterFunc) {
if (map == null) {
return writeNull ? jsonWriter.writeNullField(fieldName).flush() : jsonWriter;
}
jsonWriter.writeStartObject(fieldName);
for (Map.Entry<String, T> entry : map.entrySet()) {
jsonWriter.writeFieldName(entry.getKey());
entryWriterFunc.accept(jsonWriter, entry.getValue());
}
return jsonWriter.writeEndObject();
}
/**
* Handles basic logic for deserializing an object before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for object reading and then check if the current token is
* {@link JsonToken
* {@link JsonToken
* starting location to support partial object reads.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic, passing the reader and current
* token.
* @param <T> The type of object that is being deserialized.
* @return The deserialized object, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> T readObject(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
JsonToken currentToken = jsonReader.currentToken();
if (currentToken == null) {
currentToken = jsonReader.nextToken();
}
if (currentToken == JsonToken.NULL) {
return null;
} else if (currentToken == JsonToken.END_OBJECT || currentToken == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
return deserializationFunc.apply(jsonReader);
}
/**
* Handles basic logic for deserializing an array before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for array reading and then check if the current token is
* {@link JsonToken
* {@link IllegalStateException}.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic.
* @param <T> The type of array element that is being deserialized.
* @return The deserialized array, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> List<T> readArray(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
if (jsonReader.currentToken() == null) {
jsonReader.nextToken();
}
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.currentToken() != JsonToken.START_ARRAY) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
List<T> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(deserializationFunc.apply(jsonReader));
}
return array;
}
/**
* Reads the {@link JsonReader} as an untyped object.
* <p>
* The returned object is one of the following:
*
* <ul>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* </ul>
*
* If the {@link JsonReader
* {@link JsonToken
* with the ending of an array or object or with the name of a field.
*
* @param jsonReader The {@link JsonReader} that will be read into an untyped object.
* @return The untyped object based on the description.
* @throws IllegalStateException If the {@link JsonReader
* {@link JsonToken
*/
public static Object readUntypedField(JsonReader jsonReader) {
return readUntypedField(jsonReader, 0);
}
private static Object readUntypedField(JsonReader jsonReader, int depth) {
if (depth >= 1000) {
throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000.");
}
JsonToken token = jsonReader.currentToken();
if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin an untyped field: " + token);
}
if (token == JsonToken.NULL) {
return null;
} else if (token == JsonToken.BOOLEAN) {
return jsonReader.getBooleanValue();
} else if (token == JsonToken.NUMBER) {
String numberText = jsonReader.getTextValue();
if (numberText.contains(".")) {
return Double.parseDouble(numberText);
} else {
return Long.parseLong(numberText);
}
} else if (token == JsonToken.STRING) {
return jsonReader.getStringValue();
} else if (token == JsonToken.START_ARRAY) {
List<Object> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(readUntypedField(jsonReader, depth + 1));
}
return array;
} else if (token == JsonToken.START_OBJECT) {
Map<String, Object> object = new LinkedHashMap<>();
while (jsonReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = jsonReader.getFieldName();
jsonReader.nextToken();
Object value = readUntypedField(jsonReader, depth + 1);
object.put(fieldName, value);
}
return object;
}
throw new IllegalStateException("Unknown token type while reading an untyped field: " + token);
}
/**
* Writes the {@code value} as an untyped field to the {@link JsonWriter}.
*
* @param jsonWriter The {@link JsonWriter} that will be written.
* @param value The value to write.
* @return The updated {@code jsonWriter} with the {@code value} written to it.
*/
public static JsonWriter writeUntypedField(JsonWriter jsonWriter, Object value) {
if (value == null) {
return jsonWriter.writeNull().flush();
} else if (value instanceof Short) {
return jsonWriter.writeInt((short) value).flush();
} else if (value instanceof Integer) {
return jsonWriter.writeInt((int) value).flush();
} else if (value instanceof Long) {
return jsonWriter.writeLong((long) value).flush();
} else if (value instanceof Float) {
return jsonWriter.writeFloat((float) value).flush();
} else if (value instanceof Double) {
return jsonWriter.writeDouble((double) value).flush();
} else if (value instanceof Boolean) {
return jsonWriter.writeBoolean((boolean) value).flush();
} else if (value instanceof byte[]) {
return jsonWriter.writeBinary((byte[]) value).flush();
} else if (value instanceof CharSequence) {
return jsonWriter.writeString(String.valueOf(value)).flush();
} else if (value instanceof Character) {
return jsonWriter.writeString(String.valueOf(((Character) value).charValue())).flush();
} else if (value instanceof DateTimeRfc1123) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof OffsetDateTime) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof LocalDate) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof Duration) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof JsonSerializable<?>) {
return ((JsonSerializable<?>) value).toJson(jsonWriter).flush();
} else if (value.getClass() == Object.class) {
return jsonWriter.writeStartObject().writeEndObject().flush();
} else {
return jsonWriter.writeString(String.valueOf(value)).flush();
}
}
/**
* Gets the nullable JSON property as null if the {@link JsonReader JsonReader's} {@link JsonReader
* is {@link JsonToken
*
* @param jsonReader The {@link JsonReader} being read.
* @param nonNullGetter The non-null getter.
* @param <T> The type of the property.
* @return Either null if the current token is {@link JsonToken
* {@code nonNullGetter}.
*/
public static <T> T getNullableProperty(JsonReader jsonReader, Function<JsonReader, T> nonNullGetter) {
return jsonReader.currentToken() == JsonToken.NULL ? null : nonNullGetter.apply(jsonReader);
}
/**
* Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current
* location of {@code jsonReader}.
* <p>
* If the {@code jsonReader} is pointing to {@link JsonToken
* JSON object will be read until completion and returned as a raw JSON string.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The buffered JSON object the {@link JsonReader} was pointing to, or null if it was pointing to
* {@link JsonToken
* @throws IllegalStateException If the {@code jsonReader}'s {@link JsonReader
* one of {@link JsonToken
* {@link JsonToken
*/ |
nit, it helps to move them to a function in the base class, so the code can be shared by all tests. | public void testGetUserTests() throws Exception {
String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests");
Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient
.getLedgerIdentityWithResponse(ledgerId, null);
BinaryData identityResponse = ledgerIdentityWithResponse.getValue();
ObjectMapper mapper = new ObjectMapper();
JsonNode jsonNode = mapper.readTree(identityResponse.toBytes());
String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText();
SslContext sslContext = SslContextBuilder.forClient()
.trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build();
reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build();
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerClientBuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.httpClient(httpClient)
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerClientBuilder
.credential(new DefaultAzureCredentialBuilder().build())
.httpClient(httpClient);
}
ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient();
String userAad = Configuration.getGlobalConfiguration().get("USERAAD", "ec667af1-0642-45f0-be8a-b76758a35dde");
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.getUserWithResponse(userAad, requestOptions);
BinaryData parsedResponse = response.getValue();
Assertions.assertEquals(200, response.getStatusCode());
ObjectMapper objectMapper = new ObjectMapper();
JsonNode responseBodyJson = null;
try {
responseBodyJson = objectMapper.readTree(parsedResponse.toBytes());
} catch (IOException e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(responseBodyJson.get("assignedRole").asText(), "Administrator");
Assertions.assertEquals(responseBodyJson.get("userId").asText(), userAad);
} | ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient(); | public void testGetUserTests() throws Exception {
String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests");
Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient
.getLedgerIdentityWithResponse(ledgerId, null);
BinaryData identityResponse = ledgerIdentityWithResponse.getValue();
ObjectMapper mapper = new ObjectMapper();
JsonNode jsonNode = mapper.readTree(identityResponse.toBytes());
String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText();
SslContext sslContext = SslContextBuilder.forClient()
.trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build();
reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build();
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerClientBuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.httpClient(httpClient)
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerClientBuilder
.credential(new DefaultAzureCredentialBuilder().build())
.httpClient(httpClient);
}
ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient();
String userAad = Configuration.getGlobalConfiguration().get("USERAAD", "ec667af1-0642-45f0-be8a-b76758a35dde");
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.getUserWithResponse(userAad, requestOptions);
BinaryData parsedResponse = response.getValue();
Assertions.assertEquals(200, response.getStatusCode());
ObjectMapper objectMapper = new ObjectMapper();
JsonNode responseBodyJson = null;
try {
responseBodyJson = objectMapper.readTree(parsedResponse.toBytes());
} catch (IOException e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(responseBodyJson.get("assignedRole").asText(), "Administrator");
Assertions.assertEquals(responseBodyJson.get("userId").asText(), userAad);
} | class UserTests extends ConfidentialLedgerClientTestBase {
@Test
} | class UserTests extends ConfidentialLedgerClientTestBase {
@Test
} |
I will try to move it to the base class. I tried earlier, with no success, but it's worth another go! :) | public void testGetUserTests() throws Exception {
String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests");
Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient
.getLedgerIdentityWithResponse(ledgerId, null);
BinaryData identityResponse = ledgerIdentityWithResponse.getValue();
ObjectMapper mapper = new ObjectMapper();
JsonNode jsonNode = mapper.readTree(identityResponse.toBytes());
String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText();
SslContext sslContext = SslContextBuilder.forClient()
.trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build();
reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build();
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerClientBuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.httpClient(httpClient)
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerClientBuilder
.credential(new DefaultAzureCredentialBuilder().build())
.httpClient(httpClient);
}
ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient();
String userAad = Configuration.getGlobalConfiguration().get("USERAAD", "ec667af1-0642-45f0-be8a-b76758a35dde");
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.getUserWithResponse(userAad, requestOptions);
BinaryData parsedResponse = response.getValue();
Assertions.assertEquals(200, response.getStatusCode());
ObjectMapper objectMapper = new ObjectMapper();
JsonNode responseBodyJson = null;
try {
responseBodyJson = objectMapper.readTree(parsedResponse.toBytes());
} catch (IOException e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(responseBodyJson.get("assignedRole").asText(), "Administrator");
Assertions.assertEquals(responseBodyJson.get("userId").asText(), userAad);
} | ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient(); | public void testGetUserTests() throws Exception {
String ledgerId = Configuration.getGlobalConfiguration().get("LEDGERID", "emily-java-sdk-tests");
Response<BinaryData> ledgerIdentityWithResponse = confidentialLedgerIdentityClient
.getLedgerIdentityWithResponse(ledgerId, null);
BinaryData identityResponse = ledgerIdentityWithResponse.getValue();
ObjectMapper mapper = new ObjectMapper();
JsonNode jsonNode = mapper.readTree(identityResponse.toBytes());
String ledgerTslCertificate = jsonNode.get("ledgerTlsCertificate").asText();
SslContext sslContext = SslContextBuilder.forClient()
.trustManager(new ByteArrayInputStream(ledgerTslCertificate.getBytes(StandardCharsets.UTF_8))).build();
reactor.netty.http.client.HttpClient reactorClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
HttpClient httpClient = new NettyAsyncHttpClientBuilder(reactorClient).wiretap(true).build();
if (getTestMode() == TestMode.PLAYBACK) {
confidentialLedgerClientBuilder
.httpClient(interceptorManager.getPlaybackClient())
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
confidentialLedgerClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.httpClient(httpClient)
.credential(new DefaultAzureCredentialBuilder().build());
} else if (getTestMode() == TestMode.LIVE) {
confidentialLedgerClientBuilder
.credential(new DefaultAzureCredentialBuilder().build())
.httpClient(httpClient);
}
ConfidentialLedgerClient confidentialLedgerClient = confidentialLedgerClientBuilder.buildClient();
String userAad = Configuration.getGlobalConfiguration().get("USERAAD", "ec667af1-0642-45f0-be8a-b76758a35dde");
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.getUserWithResponse(userAad, requestOptions);
BinaryData parsedResponse = response.getValue();
Assertions.assertEquals(200, response.getStatusCode());
ObjectMapper objectMapper = new ObjectMapper();
JsonNode responseBodyJson = null;
try {
responseBodyJson = objectMapper.readTree(parsedResponse.toBytes());
} catch (IOException e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(responseBodyJson.get("assignedRole").asText(), "Administrator");
Assertions.assertEquals(responseBodyJson.get("userId").asText(), userAad);
} | class UserTests extends ConfidentialLedgerClientTestBase {
@Test
} | class UserTests extends ConfidentialLedgerClientTestBase {
@Test
} |
I am curious why we have an assertion here if this isn't a test. | public static void main(String[] args) {
ConfidentialLedgerClient confidentialLedgerClient =
new ConfidentialLedgerClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.ledgerUri("https:
.buildClient();
RequestOptions requestOptions = new RequestOptions();
String aadObjectId = "<YOUR AAD ID>";
Response<Void> response = confidentialLedgerClient.deleteUserWithResponse(aadObjectId, requestOptions);
Assertions.assertEquals(response.getStatusCode(), 204);
} | Assertions.assertEquals(response.getStatusCode(), 204); | public static void main(String[] args) {
ConfidentialLedgerClient confidentialLedgerClient =
new ConfidentialLedgerClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.ledgerUri("https:
.buildClient();
RequestOptions requestOptions = new RequestOptions();
String aadObjectId = "<YOUR AAD ID>";
Response<Void> response = confidentialLedgerClient.deleteUserWithResponse(aadObjectId, requestOptions);
} | class DeleteUser {
} | class DeleteUser {
} |
I noticed a lot of these samples use the Response overload, is this what we expect customers to use generally? It would be helpful to show what to do with this BinaryData (in all these samples)? I find customers often copying and pasting sample code as-is. | public static void main(String[] args) {
ConfidentialLedgerClient confidentialLedgerClient =
new ConfidentialLedgerClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.ledgerUri("https:
.buildClient();
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions);
} | Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions); | public static void main(String[] args) {
ConfidentialLedgerClient confidentialLedgerClient =
new ConfidentialLedgerClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.ledgerUri("https:
.buildClient();
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions);
} | class GetCollectionIds {
} | class GetCollectionIds {
} |
The service might look like ``` interface Service { void doFoo(); Mono<Void> doFooAsync(); } ``` IMO, trying to detect how the interface look like to spare that allocation is not worth it. (Especially that these two may merge eventually). | private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
} | this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser); | private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
I added a more robust example in PostLedger and referenced it in the samples README | public static void main(String[] args) {
ConfidentialLedgerClient confidentialLedgerClient =
new ConfidentialLedgerClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.ledgerUri("https:
.buildClient();
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions);
} | Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions); | public static void main(String[] args) {
ConfidentialLedgerClient confidentialLedgerClient =
new ConfidentialLedgerClientBuilder()
.credential(new DefaultAzureCredentialBuilder().build())
.ledgerUri("https:
.buildClient();
RequestOptions requestOptions = new RequestOptions();
Response<BinaryData> response = confidentialLedgerClient.listCollectionsWithResponse(requestOptions);
} | class GetCollectionIds {
} | class GetCollectionIds {
} |
how about use a const for : Duration.ofSeconds(30) ? | private OAuthBearerToken getOAuthBearerToken() {
if (accessToken == null || accessToken.isExpired()) {
TokenRequestContext request = new TokenRequestContext();
request.addScopes(tokenAudience);
request.setTenantId(properties.getProfile().getTenantId());
AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30));
if (accessToken != null) {
this.accessToken = new AzureOAuthBearerToken(accessToken);
}
}
return accessToken;
} | AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30)); | private OAuthBearerToken getOAuthBearerToken() {
if (accessToken == null || accessToken.isExpired()) {
TokenRequestContext request = new TokenRequestContext();
request.addScopes(tokenAudience);
request.setTenantId(properties.getProfile().getTenantId());
AccessToken accessToken = credential.getToken(request).block(ACCESS_TOKEN_REQUEST_BLOCK_TIME);
if (accessToken != null) {
this.accessToken = new AzureOAuthBearerToken(accessToken);
}
}
return accessToken;
} | class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler {
private final AzureThirdPartyServiceProperties properties = new AzureThirdPartyServiceProperties();
private final DefaultAzureCredentialBuilderFactory defaultAzureCredentialBuilderFactory =
new DefaultAzureCredentialBuilderFactory(properties);
private TokenCredential credential;
private AzureOAuthBearerToken accessToken;
private String tokenAudience;
private final AzureTokenCredentialResolver tokenCredentialResolver = new AzureTokenCredentialResolver();
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
String bootstrapServer = Arrays.asList(configs.get(BOOTSTRAP_SERVERS_CONFIG)).get(0).toString();
bootstrapServer = bootstrapServer.replaceAll("\\[|\\]", "");
URI uri = URI.create("https:
this.tokenAudience = uri.getScheme() + ":
credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL);
AzureIdentityCustomConfigUtils.convertConfigMapToAzureProperties(configs, properties);
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback;
credential = getTokenCredential();
OAuthBearerToken token = getOAuthBearerToken();
oauthCallback.token(token);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private TokenCredential getTokenCredential() {
if (credential == null) {
credential = tokenCredentialResolver.resolve(properties);
if (credential == null) {
credential = defaultAzureCredentialBuilderFactory.build().build();
}
}
return credential;
}
@Override
public void close() {
}
} | class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler {
private static final Duration ACCESS_TOKEN_REQUEST_BLOCK_TIME = Duration.ofSeconds(30);
private final AzureKafkaProperties properties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private TokenCredential credential;
private AzureOAuthBearerToken accessToken;
private String tokenAudience;
public KafkaOAuth2AuthenticateCallbackHandler() {
this(new AzureKafkaProperties(), new AzureTokenCredentialResolver());
}
public KafkaOAuth2AuthenticateCallbackHandler(AzureKafkaProperties properties, AzureTokenCredentialResolver tokenCredentialResolver) {
this.properties = properties;
this.tokenCredentialResolver = tokenCredentialResolver;
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
List<String> bootstrapServers = (List<String>) configs.get(BOOTSTRAP_SERVERS_CONFIG);
if (bootstrapServers == null || bootstrapServers.size() != 1) {
throw new IllegalArgumentException("Invalid bootstrap servers configured for Azure Event Hubs for Kafka! Must supply exactly 1 non-null bootstrap server configuration,"
+ " with the format as {YOUR.EVENTHUBS.FQDN}:9093.");
}
String bootstrapServer = bootstrapServers.get(0);
if (!bootstrapServer.endsWith(":9093")) {
throw new IllegalArgumentException("Invalid bootstrap server configured for Azure Event Hubs for Kafka! The format should be {YOUR.EVENTHUBS.FQDN}:9093.");
}
URI uri = URI.create("https:
this.tokenAudience = uri.getScheme() + ":
credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL);
AzureKafkaPropertiesUtils.convertConfigMapToAzureProperties(configs, properties);
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback;
credential = getTokenCredential();
OAuthBearerToken token = getOAuthBearerToken();
oauthCallback.token(token);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private TokenCredential getTokenCredential() {
if (credential == null) {
credential = tokenCredentialResolver.resolve(properties);
if (credential == null) {
credential = new DefaultAzureCredentialBuilderFactory(properties).build().build();
}
}
return credential;
}
@Override
public void close() {
}
} |
> please use OAuth2 instead how, suggest the warnning message should be specific and actionable | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof KafkaProperties) {
LOGGER.warn("Autoconfiguration for Event Hubs for Kafka on connection string/Azure Resource Manager"
+ " has been deprecated, please use OAuth2 instead.");
KafkaProperties kafkaProperties = (KafkaProperties) bean;
String connectionString = connectionStringProvider.getConnectionString();
String bootstrapServer = new EventHubsConnectionString(connectionString).getFullyQualifiedNamespace() + ":9093";
kafkaProperties.setBootstrapServers(new ArrayList<>(Collections.singletonList(bootstrapServer)));
kafkaProperties.getProperties().put(SECURITY_PROTOCOL_CONFIG, SASL_SSL.name());
kafkaProperties.getProperties().put(SASL_MECHANISM, "PLAIN");
kafkaProperties.getProperties().put(SASL_JAAS_CONFIG, String.format(SASL_CONFIG_VALUE,
connectionString, System.getProperty("line.separator")));
}
return bean;
} | + " has been deprecated, please use OAuth2 instead."); | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof KafkaProperties) {
LOGGER.warn("Autoconfiguration for Event Hubs for Kafka on connection string/Azure Resource Manager"
+ " has been deprecated, please migrate to AzureEventHubsKafkaOAuth2AutoConfiguration for OAuth2 authentication with Azure Identity credentials."
+ " To leverage the OAuth2 authentication, you can delete all your Event Hubs for Kafka credential configurations, and configure Kafka bootstrap servers"
+ " instead, which can be set as spring.kafka.boostrap-servers=EventHubsNamespacesFQDN:9093.");
KafkaProperties kafkaProperties = (KafkaProperties) bean;
String connectionString = connectionStringProvider.getConnectionString();
String bootstrapServer = new EventHubsConnectionString(connectionString).getFullyQualifiedNamespace() + ":9093";
kafkaProperties.setBootstrapServers(new ArrayList<>(Collections.singletonList(bootstrapServer)));
kafkaProperties.getProperties().put(SECURITY_PROTOCOL_CONFIG, SASL_SSL.name());
kafkaProperties.getProperties().put(SASL_MECHANISM, "PLAIN");
kafkaProperties.getProperties().put(SASL_JAAS_CONFIG, String.format(SASL_CONFIG_VALUE,
connectionString, System.getProperty("line.separator")));
}
return bean;
} | class KafkaPropertiesBeanPostProcessor implements BeanPostProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPropertiesBeanPostProcessor.class);
private static final String SASL_CONFIG_VALUE = "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"%s\";%s";
private final ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider;
KafkaPropertiesBeanPostProcessor(ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider) {
this.connectionStringProvider = connectionStringProvider;
}
@Override
} | class KafkaPropertiesBeanPostProcessor implements BeanPostProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPropertiesBeanPostProcessor.class);
private static final String SASL_CONFIG_VALUE = "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"%s\";%s";
private final ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider;
KafkaPropertiesBeanPostProcessor(ServiceConnectionStringProvider<AzureServiceType.EventHubs> connectionStringProvider) {
this.connectionStringProvider = connectionStringProvider;
}
@Override
} |
This seems like a duplicate of what we have in `azureOAuth2KafkaProducerFactoryCustomizer`? | DefaultKafkaConsumerFactoryCustomizer azureOAuth2KafkaConsumerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
if (needConfigureSaslOAuth(consumerProperties)) {
AzureKafkaProperties azureKafkaConsumerProperties = buildAzureProperties(consumerProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, buildCredentialFromProperties(azureKafkaConsumerProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
}
return factory -> factory.updateConfigs(updateConfigs);
} | } | DefaultKafkaConsumerFactoryCustomizer azureOAuth2KafkaConsumerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
configureOAuth2Properties(updateConfigs, consumerProperties);
return factory -> factory.updateConfigs(updateConfigs);
} | class AzureEventHubsKafkaOAuth2AutoConfiguration {
private final KafkaProperties kafkaProperties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private final AzureGlobalProperties azureGlobalProperties;
private final TokenCredential defaultTokenCredential;
AzureEventHubsKafkaOAuth2AutoConfiguration(KafkaProperties kafkaProperties,
AzureTokenCredentialResolver resolver,
@Qualifier(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME) TokenCredential defaultTokenCredential,
AzureGlobalProperties azureGlobalProperties) {
this.kafkaProperties = kafkaProperties;
this.tokenCredentialResolver = resolver;
this.defaultTokenCredential = defaultTokenCredential;
this.azureGlobalProperties = azureGlobalProperties;
}
@Bean
@Bean
DefaultKafkaProducerFactoryCustomizer azureOAuth2KafkaProducerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
if (needConfigureSaslOAuth(producerProperties)) {
AzureKafkaProperties azureKafkaProducerProperties = buildAzureProperties(producerProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, buildCredentialFromProperties(azureKafkaProducerProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
}
return factory -> factory.updateConfigs(updateConfigs);
}
private TokenCredential buildCredentialFromProperties(AzureKafkaProperties azureKafkaConsumerProperties) {
TokenCredential tokenCredential = tokenCredentialResolver.resolve(azureKafkaConsumerProperties);
return tokenCredential == null ? defaultTokenCredential : tokenCredential;
}
@ConditionalOnClass(KafkaMessageChannelBinder.class)
@Configuration(proxyBeanMethods = false)
static class AzureKafkaSpringCloudStreamConfiguration {
@Bean
KafkaBinderConfigurationPropertiesBeanPostProcessor kafkaBinderConfigurationPropertiesBeanPostProcessor(
AzureGlobalProperties azureGlobalProperties) {
return new KafkaBinderConfigurationPropertiesBeanPostProcessor(azureGlobalProperties);
}
}
} | class AzureEventHubsKafkaOAuth2AutoConfiguration {
private final KafkaProperties kafkaProperties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private final AzureGlobalProperties azureGlobalProperties;
private final TokenCredential defaultTokenCredential;
AzureEventHubsKafkaOAuth2AutoConfiguration(KafkaProperties kafkaProperties,
AzureTokenCredentialResolver resolver,
@Qualifier(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME) TokenCredential defaultTokenCredential,
AzureGlobalProperties azureGlobalProperties) {
this.kafkaProperties = kafkaProperties;
this.tokenCredentialResolver = resolver;
this.defaultTokenCredential = defaultTokenCredential;
this.azureGlobalProperties = azureGlobalProperties;
configureKafkaUserAgent();
}
@Bean
@Bean
DefaultKafkaProducerFactoryCustomizer azureOAuth2KafkaProducerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
configureOAuth2Properties(updateConfigs, producerProperties);
return factory -> factory.updateConfigs(updateConfigs);
}
private void configureOAuth2Properties(Map<String, Object> updateConfigs, Map<String, Object> sourceKafkaProperties) {
if (needConfigureSaslOAuth(sourceKafkaProperties)) {
AzureKafkaProperties azureKafkaProperties = buildAzureProperties(sourceKafkaProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, resolveSpringCloudAzureTokenCredential(azureKafkaProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
logConfigureOAuthProperties();
}
}
private TokenCredential resolveSpringCloudAzureTokenCredential(AzureKafkaProperties azureKafkaProperties) {
TokenCredential tokenCredential = tokenCredentialResolver.resolve(azureKafkaProperties);
return tokenCredential == null ? defaultTokenCredential : tokenCredential;
}
private void configureKafkaUserAgent() {
Method dataMethod = ReflectionUtils.findMethod(ApiVersionsRequest.class, "data");
if (dataMethod != null) {
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest.Builder().build();
ApiVersionsRequestData apiVersionsRequestData = (ApiVersionsRequestData) ReflectionUtils.invokeMethod(dataMethod, apiVersionsRequest);
if (apiVersionsRequestData != null) {
apiVersionsRequestData.setClientSoftwareName(apiVersionsRequestData.clientSoftwareName()
+ "/" + AZURE_SPRING_EVENT_HUBS_KAFKA_OAUTH);
apiVersionsRequestData.setClientSoftwareVersion(VERSION);
}
}
}
} |
I think this is better | private OAuthBearerToken getOAuthBearerToken() {
if (accessToken == null || accessToken.isExpired()) {
TokenRequestContext request = new TokenRequestContext();
request.addScopes(tokenAudience);
request.setTenantId(properties.getProfile().getTenantId());
AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30));
if (accessToken != null) {
this.accessToken = new AzureOAuthBearerToken(accessToken);
}
}
return accessToken;
} | AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30)); | private OAuthBearerToken getOAuthBearerToken() {
if (accessToken == null || accessToken.isExpired()) {
TokenRequestContext request = new TokenRequestContext();
request.addScopes(tokenAudience);
request.setTenantId(properties.getProfile().getTenantId());
AccessToken accessToken = credential.getToken(request).block(ACCESS_TOKEN_REQUEST_BLOCK_TIME);
if (accessToken != null) {
this.accessToken = new AzureOAuthBearerToken(accessToken);
}
}
return accessToken;
} | class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler {
private final AzureThirdPartyServiceProperties properties = new AzureThirdPartyServiceProperties();
private final DefaultAzureCredentialBuilderFactory defaultAzureCredentialBuilderFactory =
new DefaultAzureCredentialBuilderFactory(properties);
private TokenCredential credential;
private AzureOAuthBearerToken accessToken;
private String tokenAudience;
private final AzureTokenCredentialResolver tokenCredentialResolver = new AzureTokenCredentialResolver();
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
String bootstrapServer = Arrays.asList(configs.get(BOOTSTRAP_SERVERS_CONFIG)).get(0).toString();
bootstrapServer = bootstrapServer.replaceAll("\\[|\\]", "");
URI uri = URI.create("https:
this.tokenAudience = uri.getScheme() + ":
credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL);
AzureIdentityCustomConfigUtils.convertConfigMapToAzureProperties(configs, properties);
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback;
credential = getTokenCredential();
OAuthBearerToken token = getOAuthBearerToken();
oauthCallback.token(token);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private TokenCredential getTokenCredential() {
if (credential == null) {
credential = tokenCredentialResolver.resolve(properties);
if (credential == null) {
credential = defaultAzureCredentialBuilderFactory.build().build();
}
}
return credential;
}
@Override
public void close() {
}
} | class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler {
private static final Duration ACCESS_TOKEN_REQUEST_BLOCK_TIME = Duration.ofSeconds(30);
private final AzureKafkaProperties properties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private TokenCredential credential;
private AzureOAuthBearerToken accessToken;
private String tokenAudience;
public KafkaOAuth2AuthenticateCallbackHandler() {
this(new AzureKafkaProperties(), new AzureTokenCredentialResolver());
}
public KafkaOAuth2AuthenticateCallbackHandler(AzureKafkaProperties properties, AzureTokenCredentialResolver tokenCredentialResolver) {
this.properties = properties;
this.tokenCredentialResolver = tokenCredentialResolver;
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
List<String> bootstrapServers = (List<String>) configs.get(BOOTSTRAP_SERVERS_CONFIG);
if (bootstrapServers == null || bootstrapServers.size() != 1) {
throw new IllegalArgumentException("Invalid bootstrap servers configured for Azure Event Hubs for Kafka! Must supply exactly 1 non-null bootstrap server configuration,"
+ " with the format as {YOUR.EVENTHUBS.FQDN}:9093.");
}
String bootstrapServer = bootstrapServers.get(0);
if (!bootstrapServer.endsWith(":9093")) {
throw new IllegalArgumentException("Invalid bootstrap server configured for Azure Event Hubs for Kafka! The format should be {YOUR.EVENTHUBS.FQDN}:9093.");
}
URI uri = URI.create("https:
this.tokenAudience = uri.getScheme() + ":
credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL);
AzureKafkaPropertiesUtils.convertConfigMapToAzureProperties(configs, properties);
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback;
credential = getTokenCredential();
OAuthBearerToken token = getOAuthBearerToken();
oauthCallback.token(token);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private TokenCredential getTokenCredential() {
if (credential == null) {
credential = tokenCredentialResolver.resolve(properties);
if (credential == null) {
credential = new DefaultAzureCredentialBuilderFactory(properties).build().build();
}
}
return credential;
}
@Override
public void close() {
}
} |
yeah, will extract to a method | DefaultKafkaConsumerFactoryCustomizer azureOAuth2KafkaConsumerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
if (needConfigureSaslOAuth(consumerProperties)) {
AzureKafkaProperties azureKafkaConsumerProperties = buildAzureProperties(consumerProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, buildCredentialFromProperties(azureKafkaConsumerProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
}
return factory -> factory.updateConfigs(updateConfigs);
} | } | DefaultKafkaConsumerFactoryCustomizer azureOAuth2KafkaConsumerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
configureOAuth2Properties(updateConfigs, consumerProperties);
return factory -> factory.updateConfigs(updateConfigs);
} | class AzureEventHubsKafkaOAuth2AutoConfiguration {
private final KafkaProperties kafkaProperties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private final AzureGlobalProperties azureGlobalProperties;
private final TokenCredential defaultTokenCredential;
AzureEventHubsKafkaOAuth2AutoConfiguration(KafkaProperties kafkaProperties,
AzureTokenCredentialResolver resolver,
@Qualifier(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME) TokenCredential defaultTokenCredential,
AzureGlobalProperties azureGlobalProperties) {
this.kafkaProperties = kafkaProperties;
this.tokenCredentialResolver = resolver;
this.defaultTokenCredential = defaultTokenCredential;
this.azureGlobalProperties = azureGlobalProperties;
}
@Bean
@Bean
DefaultKafkaProducerFactoryCustomizer azureOAuth2KafkaProducerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
if (needConfigureSaslOAuth(producerProperties)) {
AzureKafkaProperties azureKafkaProducerProperties = buildAzureProperties(producerProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, buildCredentialFromProperties(azureKafkaProducerProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
}
return factory -> factory.updateConfigs(updateConfigs);
}
private TokenCredential buildCredentialFromProperties(AzureKafkaProperties azureKafkaConsumerProperties) {
TokenCredential tokenCredential = tokenCredentialResolver.resolve(azureKafkaConsumerProperties);
return tokenCredential == null ? defaultTokenCredential : tokenCredential;
}
@ConditionalOnClass(KafkaMessageChannelBinder.class)
@Configuration(proxyBeanMethods = false)
static class AzureKafkaSpringCloudStreamConfiguration {
@Bean
KafkaBinderConfigurationPropertiesBeanPostProcessor kafkaBinderConfigurationPropertiesBeanPostProcessor(
AzureGlobalProperties azureGlobalProperties) {
return new KafkaBinderConfigurationPropertiesBeanPostProcessor(azureGlobalProperties);
}
}
} | class AzureEventHubsKafkaOAuth2AutoConfiguration {
private final KafkaProperties kafkaProperties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private final AzureGlobalProperties azureGlobalProperties;
private final TokenCredential defaultTokenCredential;
AzureEventHubsKafkaOAuth2AutoConfiguration(KafkaProperties kafkaProperties,
AzureTokenCredentialResolver resolver,
@Qualifier(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME) TokenCredential defaultTokenCredential,
AzureGlobalProperties azureGlobalProperties) {
this.kafkaProperties = kafkaProperties;
this.tokenCredentialResolver = resolver;
this.defaultTokenCredential = defaultTokenCredential;
this.azureGlobalProperties = azureGlobalProperties;
configureKafkaUserAgent();
}
@Bean
@Bean
DefaultKafkaProducerFactoryCustomizer azureOAuth2KafkaProducerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
configureOAuth2Properties(updateConfigs, producerProperties);
return factory -> factory.updateConfigs(updateConfigs);
}
private void configureOAuth2Properties(Map<String, Object> updateConfigs, Map<String, Object> sourceKafkaProperties) {
if (needConfigureSaslOAuth(sourceKafkaProperties)) {
AzureKafkaProperties azureKafkaProperties = buildAzureProperties(sourceKafkaProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, resolveSpringCloudAzureTokenCredential(azureKafkaProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
logConfigureOAuthProperties();
}
}
private TokenCredential resolveSpringCloudAzureTokenCredential(AzureKafkaProperties azureKafkaProperties) {
TokenCredential tokenCredential = tokenCredentialResolver.resolve(azureKafkaProperties);
return tokenCredential == null ? defaultTokenCredential : tokenCredential;
}
private void configureKafkaUserAgent() {
Method dataMethod = ReflectionUtils.findMethod(ApiVersionsRequest.class, "data");
if (dataMethod != null) {
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest.Builder().build();
ApiVersionsRequestData apiVersionsRequestData = (ApiVersionsRequestData) ReflectionUtils.invokeMethod(dataMethod, apiVersionsRequest);
if (apiVersionsRequestData != null) {
apiVersionsRequestData.setClientSoftwareName(apiVersionsRequestData.clientSoftwareName()
+ "/" + AZURE_SPRING_EVENT_HUBS_KAFKA_OAUTH);
apiVersionsRequestData.setClientSoftwareVersion(VERSION);
}
}
}
} |
should we add some delimiter in between `apiVersionsRequestData.clientSoftwareName()` and `AZURE_SPRING_EVENT_HUBS_KAFKA_OAUTH`? | private void configureKafkaUserAgent() {
Method dataMethod = ReflectionUtils.findMethod(ApiVersionsRequest.class, "data");
if (dataMethod != null) {
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest.Builder().build();
ApiVersionsRequestData apiVersionsRequestData = (ApiVersionsRequestData) ReflectionUtils.invokeMethod(dataMethod, apiVersionsRequest);
if (apiVersionsRequestData != null) {
apiVersionsRequestData.setClientSoftwareName(apiVersionsRequestData.clientSoftwareName()
+ AZURE_SPRING_EVENT_HUBS_KAFKA_OAUTH);
apiVersionsRequestData.setClientSoftwareVersion(VERSION);
}
}
} | + AZURE_SPRING_EVENT_HUBS_KAFKA_OAUTH); | private void configureKafkaUserAgent() {
Method dataMethod = ReflectionUtils.findMethod(ApiVersionsRequest.class, "data");
if (dataMethod != null) {
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest.Builder().build();
ApiVersionsRequestData apiVersionsRequestData = (ApiVersionsRequestData) ReflectionUtils.invokeMethod(dataMethod, apiVersionsRequest);
if (apiVersionsRequestData != null) {
apiVersionsRequestData.setClientSoftwareName(apiVersionsRequestData.clientSoftwareName()
+ "/" + AZURE_SPRING_EVENT_HUBS_KAFKA_OAUTH);
apiVersionsRequestData.setClientSoftwareVersion(VERSION);
}
}
} | class AzureEventHubsKafkaOAuth2AutoConfiguration {
private final KafkaProperties kafkaProperties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private final AzureGlobalProperties azureGlobalProperties;
private final TokenCredential defaultTokenCredential;
AzureEventHubsKafkaOAuth2AutoConfiguration(KafkaProperties kafkaProperties,
AzureTokenCredentialResolver resolver,
@Qualifier(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME) TokenCredential defaultTokenCredential,
AzureGlobalProperties azureGlobalProperties) {
this.kafkaProperties = kafkaProperties;
this.tokenCredentialResolver = resolver;
this.defaultTokenCredential = defaultTokenCredential;
this.azureGlobalProperties = azureGlobalProperties;
configureKafkaUserAgent();
}
@Bean
DefaultKafkaConsumerFactoryCustomizer azureOAuth2KafkaConsumerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
configureOAuth2Properties(updateConfigs, consumerProperties);
return factory -> factory.updateConfigs(updateConfigs);
}
@Bean
DefaultKafkaProducerFactoryCustomizer azureOAuth2KafkaProducerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
configureOAuth2Properties(updateConfigs, producerProperties);
return factory -> factory.updateConfigs(updateConfigs);
}
private void configureOAuth2Properties(Map<String, Object> updateConfigs, Map<String, Object> sourceKafkaProperties) {
if (needConfigureSaslOAuth(sourceKafkaProperties)) {
AzureKafkaProperties azureKafkaProperties = buildAzureProperties(sourceKafkaProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, resolveSpringCloudAzureTokenCredential(azureKafkaProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
logConfigureOAuthProperties();
}
}
private TokenCredential resolveSpringCloudAzureTokenCredential(AzureKafkaProperties azureKafkaConsumerProperties) {
TokenCredential tokenCredential = tokenCredentialResolver.resolve(azureKafkaConsumerProperties);
return tokenCredential == null ? defaultTokenCredential : tokenCredential;
}
@ConditionalOnClass(KafkaMessageChannelBinder.class)
@Configuration(proxyBeanMethods = false)
@Import(KafkaBinderConfiguration.class)
static class AzureKafkaSpringCloudStreamConfiguration {
@Bean
KafkaBinderConfigurationPropertiesBeanPostProcessor kafkaBinderConfigurationPropertiesBeanPostProcessor(
AzureGlobalProperties azureGlobalProperties) {
return new KafkaBinderConfigurationPropertiesBeanPostProcessor(azureGlobalProperties);
}
}
} | class AzureEventHubsKafkaOAuth2AutoConfiguration {
private final KafkaProperties kafkaProperties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private final AzureGlobalProperties azureGlobalProperties;
private final TokenCredential defaultTokenCredential;
AzureEventHubsKafkaOAuth2AutoConfiguration(KafkaProperties kafkaProperties,
AzureTokenCredentialResolver resolver,
@Qualifier(DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME) TokenCredential defaultTokenCredential,
AzureGlobalProperties azureGlobalProperties) {
this.kafkaProperties = kafkaProperties;
this.tokenCredentialResolver = resolver;
this.defaultTokenCredential = defaultTokenCredential;
this.azureGlobalProperties = azureGlobalProperties;
configureKafkaUserAgent();
}
@Bean
DefaultKafkaConsumerFactoryCustomizer azureOAuth2KafkaConsumerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
configureOAuth2Properties(updateConfigs, consumerProperties);
return factory -> factory.updateConfigs(updateConfigs);
}
@Bean
DefaultKafkaProducerFactoryCustomizer azureOAuth2KafkaProducerFactoryCustomizer() {
Map<String, Object> updateConfigs = new HashMap<>();
Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
configureOAuth2Properties(updateConfigs, producerProperties);
return factory -> factory.updateConfigs(updateConfigs);
}
private void configureOAuth2Properties(Map<String, Object> updateConfigs, Map<String, Object> sourceKafkaProperties) {
if (needConfigureSaslOAuth(sourceKafkaProperties)) {
AzureKafkaProperties azureKafkaProperties = buildAzureProperties(sourceKafkaProperties,
azureGlobalProperties);
updateConfigs.put(AZURE_TOKEN_CREDENTIAL, resolveSpringCloudAzureTokenCredential(azureKafkaProperties));
updateConfigs.putAll(KAFKA_OAUTH_CONFIGS);
logConfigureOAuthProperties();
}
}
private TokenCredential resolveSpringCloudAzureTokenCredential(AzureKafkaProperties azureKafkaProperties) {
TokenCredential tokenCredential = tokenCredentialResolver.resolve(azureKafkaProperties);
return tokenCredential == null ? defaultTokenCredential : tokenCredential;
}
} |
How about this? | private OAuthBearerToken getOAuthBearerToken() {
if (accessToken == null || accessToken.isExpired()) {
TokenRequestContext request = new TokenRequestContext();
request.addScopes(tokenAudience);
request.setTenantId(properties.getProfile().getTenantId());
AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30));
if (accessToken != null) {
this.accessToken = new AzureOAuthBearerToken(accessToken);
}
}
return accessToken;
} | AccessToken accessToken = credential.getToken(request).block(Duration.ofSeconds(30)); | private OAuthBearerToken getOAuthBearerToken() {
if (accessToken == null || accessToken.isExpired()) {
TokenRequestContext request = new TokenRequestContext();
request.addScopes(tokenAudience);
request.setTenantId(properties.getProfile().getTenantId());
AccessToken accessToken = credential.getToken(request).block(ACCESS_TOKEN_REQUEST_BLOCK_TIME);
if (accessToken != null) {
this.accessToken = new AzureOAuthBearerToken(accessToken);
}
}
return accessToken;
} | class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler {
private final AzureThirdPartyServiceProperties properties = new AzureThirdPartyServiceProperties();
private final DefaultAzureCredentialBuilderFactory defaultAzureCredentialBuilderFactory =
new DefaultAzureCredentialBuilderFactory(properties);
private TokenCredential credential;
private AzureOAuthBearerToken accessToken;
private String tokenAudience;
private final AzureTokenCredentialResolver tokenCredentialResolver = new AzureTokenCredentialResolver();
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
String bootstrapServer = Arrays.asList(configs.get(BOOTSTRAP_SERVERS_CONFIG)).get(0).toString();
bootstrapServer = bootstrapServer.replaceAll("\\[|\\]", "");
URI uri = URI.create("https:
this.tokenAudience = uri.getScheme() + ":
credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL);
AzureIdentityCustomConfigUtils.convertConfigMapToAzureProperties(configs, properties);
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback;
credential = getTokenCredential();
OAuthBearerToken token = getOAuthBearerToken();
oauthCallback.token(token);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private TokenCredential getTokenCredential() {
if (credential == null) {
credential = tokenCredentialResolver.resolve(properties);
if (credential == null) {
credential = defaultAzureCredentialBuilderFactory.build().build();
}
}
return credential;
}
@Override
public void close() {
}
} | class KafkaOAuth2AuthenticateCallbackHandler implements AuthenticateCallbackHandler {
private static final Duration ACCESS_TOKEN_REQUEST_BLOCK_TIME = Duration.ofSeconds(30);
private final AzureKafkaProperties properties;
private final AzureTokenCredentialResolver tokenCredentialResolver;
private TokenCredential credential;
private AzureOAuthBearerToken accessToken;
private String tokenAudience;
public KafkaOAuth2AuthenticateCallbackHandler() {
this(new AzureKafkaProperties(), new AzureTokenCredentialResolver());
}
public KafkaOAuth2AuthenticateCallbackHandler(AzureKafkaProperties properties, AzureTokenCredentialResolver tokenCredentialResolver) {
this.properties = properties;
this.tokenCredentialResolver = tokenCredentialResolver;
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
List<String> bootstrapServers = (List<String>) configs.get(BOOTSTRAP_SERVERS_CONFIG);
if (bootstrapServers == null || bootstrapServers.size() != 1) {
throw new IllegalArgumentException("Invalid bootstrap servers configured for Azure Event Hubs for Kafka! Must supply exactly 1 non-null bootstrap server configuration,"
+ " with the format as {YOUR.EVENTHUBS.FQDN}:9093.");
}
String bootstrapServer = bootstrapServers.get(0);
if (!bootstrapServer.endsWith(":9093")) {
throw new IllegalArgumentException("Invalid bootstrap server configured for Azure Event Hubs for Kafka! The format should be {YOUR.EVENTHUBS.FQDN}:9093.");
}
URI uri = URI.create("https:
this.tokenAudience = uri.getScheme() + ":
credential = (TokenCredential) configs.get(AZURE_TOKEN_CREDENTIAL);
AzureKafkaPropertiesUtils.convertConfigMapToAzureProperties(configs, properties);
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oauthCallback = (OAuthBearerTokenCallback) callback;
credential = getTokenCredential();
OAuthBearerToken token = getOAuthBearerToken();
oauthCallback.token(token);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private TokenCredential getTokenCredential() {
if (credential == null) {
credential = tokenCredentialResolver.resolve(properties);
if (credential == null) {
credential = new DefaultAzureCredentialBuilderFactory(properties).build().build();
}
}
return credential;
}
@Override
public void close() {
}
} |
```suggestion JsonReader replayReader = DefaultJsonReader.bufferObject(jsonReader); ``` | public static AnimalWithTypeIdContainingDot fromJson(JsonReader jsonReader) {
return JsonUtils.readObject(jsonReader, reader -> {
String json = JsonUtils.bufferedJsonObject(jsonReader);
JsonReader replayReader = DefaultJsonReader.fromString(json);
String discriminatorValue = null;
while (replayReader.nextToken() != JsonToken.END_OBJECT) {
if ("@odata.type".equals(replayReader.getFieldName())) {
replayReader.nextToken();
discriminatorValue = replayReader.getStringValue();
break;
}
}
if ("
return DogWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else if ("
return CatWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else if ("
return RabbitWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else {
throw new IllegalStateException("Discriminator field '@odata.type' was either missing or didn't match "
+ "one of the expected values '
+ "'
+ "'
}
});
} | JsonReader replayReader = DefaultJsonReader.fromString(json); | public static AnimalWithTypeIdContainingDot fromJson(JsonReader jsonReader) {
return JsonUtils.readObject(jsonReader, reader -> {
String discriminatorValue = null;
JsonReader readerToUse = null;
jsonReader.nextToken();
if ("@odata.type".equals(jsonReader.getFieldName())) {
jsonReader.nextToken();
discriminatorValue = jsonReader.getStringValue();
readerToUse = jsonReader;
} else {
String json = JsonUtils.bufferJsonObject(jsonReader);
JsonReader replayReader = DefaultJsonReader.fromString(json);
while (replayReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = replayReader.getFieldName();
replayReader.nextToken();
if ("@odata.type".equals(fieldName)) {
discriminatorValue = replayReader.getStringValue();
break;
} else {
replayReader.skipChildren();
}
}
if (discriminatorValue != null) {
readerToUse = DefaultJsonReader.fromString(json);
}
}
if ("
return DogWithTypeIdContainingDot.fromJson(readerToUse);
} else if ("
return CatWithTypeIdContainingDot.fromJson(readerToUse);
} else if ("
return RabbitWithTypeIdContainingDot.fromJson(readerToUse);
} else {
throw new IllegalStateException("Discriminator field '@odata.type' was either missing or didn't match "
+ "one of the expected values '
+ "'
+ "'
}
});
} | class AnimalWithTypeIdContainingDot implements JsonSerializable<AnimalWithTypeIdContainingDot> {
/**
* Creates an instance of {@link AnimalWithTypeIdContainingDot} by reading the {@link JsonReader}.
*
* @param jsonReader The {@link JsonReader} that will be read.
* @return An instance of {@link AnimalWithTypeIdContainingDot} if the {@link JsonReader} is pointing to
* {@link AnimalWithTypeIdContainingDot} JSON content, or null if it is pointing to {@link JsonToken
* @throws IllegalStateException If the {@link JsonReader} wasn't pointing to the correct {@link JsonToken} when
* passed.
*/
} | class AnimalWithTypeIdContainingDot implements JsonSerializable<AnimalWithTypeIdContainingDot> {
/**
* Creates an instance of {@link AnimalWithTypeIdContainingDot} by reading the {@link JsonReader}.
*
* @param jsonReader The {@link JsonReader} that will be read.
* @return An instance of {@link AnimalWithTypeIdContainingDot} if the {@link JsonReader} is pointing to
* {@link AnimalWithTypeIdContainingDot} JSON content, or null if it is pointing to {@link JsonToken
* @throws IllegalStateException If the {@link JsonReader} wasn't pointing to the correct {@link JsonToken} when
* passed.
*/
} |
assuming the discriminator is the first, can we avoid buffering? It can be done later, for now it would be great to understand how the current API would evolve to allow it. | public static AnimalWithTypeIdContainingDot fromJson(JsonReader jsonReader) {
return JsonUtils.readObject(jsonReader, reader -> {
String json = JsonUtils.bufferedJsonObject(jsonReader);
JsonReader replayReader = DefaultJsonReader.fromString(json);
String discriminatorValue = null;
while (replayReader.nextToken() != JsonToken.END_OBJECT) {
if ("@odata.type".equals(replayReader.getFieldName())) {
replayReader.nextToken();
discriminatorValue = replayReader.getStringValue();
break;
}
}
if ("
return DogWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else if ("
return CatWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else if ("
return RabbitWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else {
throw new IllegalStateException("Discriminator field '@odata.type' was either missing or didn't match "
+ "one of the expected values '
+ "'
+ "'
}
});
} | String json = JsonUtils.bufferedJsonObject(jsonReader); | public static AnimalWithTypeIdContainingDot fromJson(JsonReader jsonReader) {
return JsonUtils.readObject(jsonReader, reader -> {
String discriminatorValue = null;
JsonReader readerToUse = null;
jsonReader.nextToken();
if ("@odata.type".equals(jsonReader.getFieldName())) {
jsonReader.nextToken();
discriminatorValue = jsonReader.getStringValue();
readerToUse = jsonReader;
} else {
String json = JsonUtils.bufferJsonObject(jsonReader);
JsonReader replayReader = DefaultJsonReader.fromString(json);
while (replayReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = replayReader.getFieldName();
replayReader.nextToken();
if ("@odata.type".equals(fieldName)) {
discriminatorValue = replayReader.getStringValue();
break;
} else {
replayReader.skipChildren();
}
}
if (discriminatorValue != null) {
readerToUse = DefaultJsonReader.fromString(json);
}
}
if ("
return DogWithTypeIdContainingDot.fromJson(readerToUse);
} else if ("
return CatWithTypeIdContainingDot.fromJson(readerToUse);
} else if ("
return RabbitWithTypeIdContainingDot.fromJson(readerToUse);
} else {
throw new IllegalStateException("Discriminator field '@odata.type' was either missing or didn't match "
+ "one of the expected values '
+ "'
+ "'
}
});
} | class AnimalWithTypeIdContainingDot implements JsonSerializable<AnimalWithTypeIdContainingDot> {
/**
* Creates an instance of {@link AnimalWithTypeIdContainingDot} by reading the {@link JsonReader}.
*
* @param jsonReader The {@link JsonReader} that will be read.
* @return An instance of {@link AnimalWithTypeIdContainingDot} if the {@link JsonReader} is pointing to
* {@link AnimalWithTypeIdContainingDot} JSON content, or null if it is pointing to {@link JsonToken
* @throws IllegalStateException If the {@link JsonReader} wasn't pointing to the correct {@link JsonToken} when
* passed.
*/
} | class AnimalWithTypeIdContainingDot implements JsonSerializable<AnimalWithTypeIdContainingDot> {
/**
* Creates an instance of {@link AnimalWithTypeIdContainingDot} by reading the {@link JsonReader}.
*
* @param jsonReader The {@link JsonReader} that will be read.
* @return An instance of {@link AnimalWithTypeIdContainingDot} if the {@link JsonReader} is pointing to
* {@link AnimalWithTypeIdContainingDot} JSON content, or null if it is pointing to {@link JsonToken
* @throws IllegalStateException If the {@link JsonReader} wasn't pointing to the correct {@link JsonToken} when
* passed.
*/
} |
Yes, an optimization can be done if the first property is the discriminator. Maybe this is a case where the first value can be peeked | public static AnimalWithTypeIdContainingDot fromJson(JsonReader jsonReader) {
return JsonUtils.readObject(jsonReader, reader -> {
String json = JsonUtils.bufferedJsonObject(jsonReader);
JsonReader replayReader = DefaultJsonReader.fromString(json);
String discriminatorValue = null;
while (replayReader.nextToken() != JsonToken.END_OBJECT) {
if ("@odata.type".equals(replayReader.getFieldName())) {
replayReader.nextToken();
discriminatorValue = replayReader.getStringValue();
break;
}
}
if ("
return DogWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else if ("
return CatWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else if ("
return RabbitWithTypeIdContainingDot.fromJson(DefaultJsonReader.fromString(json));
} else {
throw new IllegalStateException("Discriminator field '@odata.type' was either missing or didn't match "
+ "one of the expected values '
+ "'
+ "'
}
});
} | String json = JsonUtils.bufferedJsonObject(jsonReader); | public static AnimalWithTypeIdContainingDot fromJson(JsonReader jsonReader) {
return JsonUtils.readObject(jsonReader, reader -> {
String discriminatorValue = null;
JsonReader readerToUse = null;
jsonReader.nextToken();
if ("@odata.type".equals(jsonReader.getFieldName())) {
jsonReader.nextToken();
discriminatorValue = jsonReader.getStringValue();
readerToUse = jsonReader;
} else {
String json = JsonUtils.bufferJsonObject(jsonReader);
JsonReader replayReader = DefaultJsonReader.fromString(json);
while (replayReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = replayReader.getFieldName();
replayReader.nextToken();
if ("@odata.type".equals(fieldName)) {
discriminatorValue = replayReader.getStringValue();
break;
} else {
replayReader.skipChildren();
}
}
if (discriminatorValue != null) {
readerToUse = DefaultJsonReader.fromString(json);
}
}
if ("
return DogWithTypeIdContainingDot.fromJson(readerToUse);
} else if ("
return CatWithTypeIdContainingDot.fromJson(readerToUse);
} else if ("
return RabbitWithTypeIdContainingDot.fromJson(readerToUse);
} else {
throw new IllegalStateException("Discriminator field '@odata.type' was either missing or didn't match "
+ "one of the expected values '
+ "'
+ "'
}
});
} | class AnimalWithTypeIdContainingDot implements JsonSerializable<AnimalWithTypeIdContainingDot> {
/**
* Creates an instance of {@link AnimalWithTypeIdContainingDot} by reading the {@link JsonReader}.
*
* @param jsonReader The {@link JsonReader} that will be read.
* @return An instance of {@link AnimalWithTypeIdContainingDot} if the {@link JsonReader} is pointing to
* {@link AnimalWithTypeIdContainingDot} JSON content, or null if it is pointing to {@link JsonToken
* @throws IllegalStateException If the {@link JsonReader} wasn't pointing to the correct {@link JsonToken} when
* passed.
*/
} | class AnimalWithTypeIdContainingDot implements JsonSerializable<AnimalWithTypeIdContainingDot> {
/**
* Creates an instance of {@link AnimalWithTypeIdContainingDot} by reading the {@link JsonReader}.
*
* @param jsonReader The {@link JsonReader} that will be read.
* @return An instance of {@link AnimalWithTypeIdContainingDot} if the {@link JsonReader} is pointing to
* {@link AnimalWithTypeIdContainingDot} JSON content, or null if it is pointing to {@link JsonToken
* @throws IllegalStateException If the {@link JsonReader} wasn't pointing to the correct {@link JsonToken} when
* passed.
*/
} |
what if somebody applies this function to root and root is `1` or `"test string"` ? - root being primitive is valid json. | public static String bufferJsonObject(JsonReader jsonReader) {
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.isStartArrayOrObject()) {
return jsonReader.readChildren();
} else if (jsonReader.currentToken() == JsonToken.FIELD_NAME) {
StringBuilder json = new StringBuilder("{");
JsonToken token = jsonReader.currentToken();
boolean needsComa = false;
while (token != JsonToken.END_OBJECT) {
if (needsComa) {
json.append(",");
}
if (token == JsonToken.FIELD_NAME) {
json.append("\"").append(jsonReader.getFieldName()).append("\":");
needsComa = false;
} else {
if (token == JsonToken.STRING) {
json.append("\"").append(jsonReader.getStringValue()).append("\"");
} else if (jsonReader.isStartArrayOrObject()) {
jsonReader.readChildren(json);
} else {
json.append(jsonReader.getTextValue());
}
needsComa = true;
}
token = jsonReader.nextToken();
}
return json.toString();
} else {
throw new IllegalStateException("Cannot buffer a JSON object from a non-array, non-object, non-field name "
+ "starting location. Starting location: " + jsonReader.currentToken());
}
}
private JsonUtils() {
}
} | + "starting location. Starting location: " + jsonReader.currentToken()); | public static String bufferJsonObject(JsonReader jsonReader) {
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.isStartArrayOrObject()) {
return jsonReader.readChildren();
} else if (jsonReader.currentToken() == JsonToken.FIELD_NAME) {
StringBuilder json = new StringBuilder("{");
JsonToken token = jsonReader.currentToken();
boolean needsComa = false;
while (token != JsonToken.END_OBJECT) {
if (needsComa) {
json.append(",");
}
if (token == JsonToken.FIELD_NAME) {
json.append("\"").append(jsonReader.getFieldName()).append("\":");
needsComa = false;
} else {
if (token == JsonToken.STRING) {
json.append("\"").append(jsonReader.getStringValue()).append("\"");
} else if (jsonReader.isStartArrayOrObject()) {
jsonReader.readChildren(json);
} else {
json.append(jsonReader.getTextValue());
}
needsComa = true;
}
token = jsonReader.nextToken();
}
return json.toString();
} else {
throw new IllegalStateException("Cannot buffer a JSON object from a non-array, non-object, non-field name "
+ "starting location. Starting location: " + jsonReader.currentToken());
}
}
private JsonUtils() {
}
} | class JsonUtils {
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, T[] array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return jsonWriter.writeNullField(fieldName).flush();
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, Iterable<T> array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return jsonWriter.writeNullField(fieldName).flush();
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes a map.
*
* @param jsonWriter The {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the map.
* @param map The map.
* @param entryWriterFunc Function that writes the map entry value.
* @param <T> Type of map value.
* @return The updated {@link JsonWriter} object.
*/
public static <T> JsonWriter writeMap(JsonWriter jsonWriter, String fieldName, Map<String, T> map,
BiConsumer<JsonWriter, T> entryWriterFunc) {
if (map == null) {
return jsonWriter.writeNullField(fieldName).flush();
}
jsonWriter.writeStartObject(fieldName);
for (Map.Entry<String, T> entry : map.entrySet()) {
jsonWriter.writeFieldName(entry.getKey());
entryWriterFunc.accept(jsonWriter, entry.getValue());
}
return jsonWriter.writeEndObject();
}
/**
* Handles basic logic for deserializing an object before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for object reading and then check if the current token is
* {@link JsonToken
* {@link JsonToken
* starting location to support partial object reads.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic, passing the reader and current
* token.
* @param <T> The type of object that is being deserialized.
* @return The deserialized object, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> T readObject(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
JsonToken currentToken = jsonReader.currentToken();
if (currentToken == null) {
currentToken = jsonReader.nextToken();
}
if (currentToken == JsonToken.NULL) {
return null;
} else if (currentToken == JsonToken.END_OBJECT || currentToken == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
return deserializationFunc.apply(jsonReader);
}
/**
* Handles basic logic for deserializing an array before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for array reading and then check if the current token is
* {@link JsonToken
* {@link IllegalStateException}.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic.
* @param <T> The type of array element that is being deserialized.
* @return The deserialized array, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> List<T> readArray(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
if (jsonReader.currentToken() == null) {
jsonReader.nextToken();
}
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.currentToken() != JsonToken.START_ARRAY) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
List<T> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(deserializationFunc.apply(jsonReader));
}
return array;
}
/**
* Reads the {@link JsonReader} as an untyped object.
* <p>
* The returned object is one of the following:
*
* <ul>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* </ul>
*
* If the {@link JsonReader
* {@link JsonToken
* with the ending of an array or object or with the name of a field.
*
* @param jsonReader The {@link JsonReader} that will be read into an untyped object.
* @return The untyped object based on the description.
* @throws IllegalStateException If the {@link JsonReader
* {@link JsonToken
*/
public static Object readUntypedField(JsonReader jsonReader) {
return readUntypedField(jsonReader, 0);
}
private static Object readUntypedField(JsonReader jsonReader, int depth) {
if (depth >= 1000) {
throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000.");
}
JsonToken token = jsonReader.currentToken();
if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin an untyped field: " + token);
}
if (token == JsonToken.NULL) {
return null;
} else if (token == JsonToken.BOOLEAN) {
return jsonReader.getBooleanValue();
} else if (token == JsonToken.NUMBER) {
String numberText = jsonReader.getTextValue();
if (numberText.contains(".")) {
return Double.parseDouble(numberText);
} else {
return Long.parseLong(numberText);
}
} else if (token == JsonToken.STRING) {
return jsonReader.getStringValue();
} else if (token == JsonToken.START_ARRAY) {
List<Object> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(readUntypedField(jsonReader, depth + 1));
}
return array;
} else if (token == JsonToken.START_OBJECT) {
Map<String, Object> object = new LinkedHashMap<>();
while (jsonReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = jsonReader.getFieldName();
jsonReader.nextToken();
Object value = readUntypedField(jsonReader, depth + 1);
object.put(fieldName, value);
}
return object;
}
throw new IllegalStateException("Unknown token type while reading an untyped field: " + token);
}
/**
* Writes the {@code value} as an untyped field to the {@link JsonWriter}.
*
* @param jsonWriter The {@link JsonWriter} that will be written.
* @param value The value to write.
* @return The updated {@code jsonWriter} with the {@code value} written to it.
*/
public static JsonWriter writeUntypedField(JsonWriter jsonWriter, Object value) {
if (value == null) {
return jsonWriter.writeNull().flush();
} else if (value instanceof Short) {
return jsonWriter.writeInt((short) value).flush();
} else if (value instanceof Integer) {
return jsonWriter.writeInt((int) value).flush();
} else if (value instanceof Long) {
return jsonWriter.writeLong((long) value).flush();
} else if (value instanceof Float) {
return jsonWriter.writeFloat((float) value).flush();
} else if (value instanceof Double) {
return jsonWriter.writeDouble((double) value).flush();
} else if (value instanceof Boolean) {
return jsonWriter.writeBoolean((boolean) value).flush();
} else if (value instanceof byte[]) {
return jsonWriter.writeBinary((byte[]) value).flush();
} else if (value instanceof CharSequence) {
return jsonWriter.writeString(String.valueOf(value)).flush();
} else if (value instanceof JsonSerializable<?>) {
return ((JsonSerializable<?>) value).toJson(jsonWriter).flush();
} else if (value.getClass() == Object.class) {
return jsonWriter.writeStartObject().writeEndObject().flush();
} else {
return jsonWriter.writeString(String.valueOf(value)).flush();
}
}
/**
* Gets the nullable JSON property as null if the {@link JsonReader JsonReader's} {@link JsonReader
* is {@link JsonToken
*
* @param jsonReader The {@link JsonReader} being read.
* @param nonNullGetter The non-null getter.
* @param <T> The type of the property.
* @return Either null if the current token is {@link JsonToken
* {@code nonNullGetter}.
*/
public static <T> T getNullableProperty(JsonReader jsonReader, Function<JsonReader, T> nonNullGetter) {
return jsonReader.currentToken() == JsonToken.NULL ? null : nonNullGetter.apply(jsonReader);
}
/**
* Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current
* location of {@code jsonReader}.
* <p>
* If the {@code jsonReader} is pointing to {@link JsonToken
* JSON object will be read until completion and returned as a raw JSON string.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The buffered JSON object the {@link JsonReader} was pointing to, or null if it was pointing to
* {@link JsonToken
* @throws IllegalStateException If the {@code jsonReader}'s {@link JsonReader
* one of {@link JsonToken
* {@link JsonToken
*/ | class JsonUtils {
/**
* Serializes an array.
* <p>
* Handles two scenarios for the array:
*
* <ul>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* If a null array should be written as JSON null use
* {@link
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, T[] array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
return writeArray(jsonWriter, fieldName, array, false, elementWriterFunc);
}
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null, iff {@code writeNull} is true</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param writeNull Whether JSON null should be written if {@code array} is null.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null and {@code writeNull} is
* false.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, T[] array, boolean writeNull,
BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return writeNull ? jsonWriter.writeNullField(fieldName).flush() : jsonWriter;
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes an array.
* <p>
* Handles two scenarios for the array:
*
* <ul>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* If a null array should be written as JSON null use
* {@link
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, Iterable<T> array,
BiConsumer<JsonWriter, T> elementWriterFunc) {
return writeArray(jsonWriter, fieldName, array, false, elementWriterFunc);
}
/**
* Serializes an array.
* <p>
* Handles three scenarios for the array:
*
* <ul>
* <li>null {@code array} writes JSON null, iff {@code writeNull} is true</li>
* <li>empty {@code array} writes {@code []}</li>
* <li>non-empty {@code array} writes a populated JSON array</li>
* </ul>
*
* @param jsonWriter {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the array.
* @param array The array.
* @param writeNull Whether JSON null should be written if {@code array} is null.
* @param elementWriterFunc Function that writes the array element.
* @param <T> Type of array element.
* @return The updated {@link JsonWriter} object, or a no-op if {@code array} is null and {@code writeNull} is
* false.
*/
public static <T> JsonWriter writeArray(JsonWriter jsonWriter, String fieldName, Iterable<T> array,
boolean writeNull, BiConsumer<JsonWriter, T> elementWriterFunc) {
if (array == null) {
return writeNull ? jsonWriter.writeNullField(fieldName).flush() : jsonWriter;
}
jsonWriter.writeStartArray(fieldName);
for (T element : array) {
elementWriterFunc.accept(jsonWriter, element);
}
return jsonWriter.writeEndArray().flush();
}
/**
* Serializes a map.
* <p>
* If the map is null this method is a no-op. Use {@link
* and passed true for {@code writeNull} if JSON null should be written.
*
* @param jsonWriter The {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the map.
* @param map The map.
* @param entryWriterFunc Function that writes the map entry value.
* @param <T> Type of map value.
* @return The updated {@link JsonWriter} object, or a no-op if {@code map} is null
*/
public static <T> JsonWriter writeMap(JsonWriter jsonWriter, String fieldName, Map<String, T> map,
BiConsumer<JsonWriter, T> entryWriterFunc) {
return writeMap(jsonWriter, fieldName, map, false, entryWriterFunc);
}
/**
* Serializes a map.
* <p>
* If {@code map} is null and {@code writeNull} is false this method is effectively a no-op.
*
* @param jsonWriter The {@link JsonWriter} where JSON will be written.
* @param fieldName Field name for the map.
* @param map The map.
* @param writeNull Whether JSON null should be written if {@code map} is null.
* @param entryWriterFunc Function that writes the map entry value.
* @param <T> Type of map value.
* @return The updated {@link JsonWriter} object, or a no-op if {@code map} is null and {@code writeNull} is false.
*/
public static <T> JsonWriter writeMap(JsonWriter jsonWriter, String fieldName, Map<String, T> map,
boolean writeNull, BiConsumer<JsonWriter, T> entryWriterFunc) {
if (map == null) {
return writeNull ? jsonWriter.writeNullField(fieldName).flush() : jsonWriter;
}
jsonWriter.writeStartObject(fieldName);
for (Map.Entry<String, T> entry : map.entrySet()) {
jsonWriter.writeFieldName(entry.getKey());
entryWriterFunc.accept(jsonWriter, entry.getValue());
}
return jsonWriter.writeEndObject();
}
/**
* Handles basic logic for deserializing an object before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for object reading and then check if the current token is
* {@link JsonToken
* {@link JsonToken
* starting location to support partial object reads.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic, passing the reader and current
* token.
* @param <T> The type of object that is being deserialized.
* @return The deserialized object, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> T readObject(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
JsonToken currentToken = jsonReader.currentToken();
if (currentToken == null) {
currentToken = jsonReader.nextToken();
}
if (currentToken == JsonToken.NULL) {
return null;
} else if (currentToken == JsonToken.END_OBJECT || currentToken == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
return deserializationFunc.apply(jsonReader);
}
/**
* Handles basic logic for deserializing an array before passing it into the deserialization function.
* <p>
* This will initialize the {@link JsonReader} for array reading and then check if the current token is
* {@link JsonToken
* {@link IllegalStateException}.
* <p>
* Use {@link
*
* @param jsonReader The {@link JsonReader} being read.
* @param deserializationFunc The function that handles deserialization logic.
* @param <T> The type of array element that is being deserialized.
* @return The deserialized array, or null if the {@link JsonToken
* @throws IllegalStateException If the initial token for reading isn't {@link JsonToken
*/
public static <T> List<T> readArray(JsonReader jsonReader, Function<JsonReader, T> deserializationFunc) {
if (jsonReader.currentToken() == null) {
jsonReader.nextToken();
}
if (jsonReader.currentToken() == JsonToken.NULL) {
return null;
} else if (jsonReader.currentToken() != JsonToken.START_ARRAY) {
throw new IllegalStateException("Unexpected token to begin deserialization: " + jsonReader.currentToken());
}
List<T> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(deserializationFunc.apply(jsonReader));
}
return array;
}
/**
* Reads the {@link JsonReader} as an untyped object.
* <p>
* The returned object is one of the following:
*
* <ul>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* <li></li>
* </ul>
*
* If the {@link JsonReader
* {@link JsonToken
* with the ending of an array or object or with the name of a field.
*
* @param jsonReader The {@link JsonReader} that will be read into an untyped object.
* @return The untyped object based on the description.
* @throws IllegalStateException If the {@link JsonReader
* {@link JsonToken
*/
public static Object readUntypedField(JsonReader jsonReader) {
return readUntypedField(jsonReader, 0);
}
private static Object readUntypedField(JsonReader jsonReader, int depth) {
if (depth >= 1000) {
throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000.");
}
JsonToken token = jsonReader.currentToken();
if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) {
throw new IllegalStateException("Unexpected token to begin an untyped field: " + token);
}
if (token == JsonToken.NULL) {
return null;
} else if (token == JsonToken.BOOLEAN) {
return jsonReader.getBooleanValue();
} else if (token == JsonToken.NUMBER) {
String numberText = jsonReader.getTextValue();
if (numberText.contains(".")) {
return Double.parseDouble(numberText);
} else {
return Long.parseLong(numberText);
}
} else if (token == JsonToken.STRING) {
return jsonReader.getStringValue();
} else if (token == JsonToken.START_ARRAY) {
List<Object> array = new ArrayList<>();
while (jsonReader.nextToken() != JsonToken.END_ARRAY) {
array.add(readUntypedField(jsonReader, depth + 1));
}
return array;
} else if (token == JsonToken.START_OBJECT) {
Map<String, Object> object = new LinkedHashMap<>();
while (jsonReader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = jsonReader.getFieldName();
jsonReader.nextToken();
Object value = readUntypedField(jsonReader, depth + 1);
object.put(fieldName, value);
}
return object;
}
throw new IllegalStateException("Unknown token type while reading an untyped field: " + token);
}
/**
* Writes the {@code value} as an untyped field to the {@link JsonWriter}.
*
* @param jsonWriter The {@link JsonWriter} that will be written.
* @param value The value to write.
* @return The updated {@code jsonWriter} with the {@code value} written to it.
*/
public static JsonWriter writeUntypedField(JsonWriter jsonWriter, Object value) {
if (value == null) {
return jsonWriter.writeNull().flush();
} else if (value instanceof Short) {
return jsonWriter.writeInt((short) value).flush();
} else if (value instanceof Integer) {
return jsonWriter.writeInt((int) value).flush();
} else if (value instanceof Long) {
return jsonWriter.writeLong((long) value).flush();
} else if (value instanceof Float) {
return jsonWriter.writeFloat((float) value).flush();
} else if (value instanceof Double) {
return jsonWriter.writeDouble((double) value).flush();
} else if (value instanceof Boolean) {
return jsonWriter.writeBoolean((boolean) value).flush();
} else if (value instanceof byte[]) {
return jsonWriter.writeBinary((byte[]) value).flush();
} else if (value instanceof CharSequence) {
return jsonWriter.writeString(String.valueOf(value)).flush();
} else if (value instanceof Character) {
return jsonWriter.writeString(String.valueOf(((Character) value).charValue())).flush();
} else if (value instanceof DateTimeRfc1123) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof OffsetDateTime) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof LocalDate) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof Duration) {
return jsonWriter.writeString(value.toString()).flush();
} else if (value instanceof JsonSerializable<?>) {
return ((JsonSerializable<?>) value).toJson(jsonWriter).flush();
} else if (value.getClass() == Object.class) {
return jsonWriter.writeStartObject().writeEndObject().flush();
} else {
return jsonWriter.writeString(String.valueOf(value)).flush();
}
}
/**
* Gets the nullable JSON property as null if the {@link JsonReader JsonReader's} {@link JsonReader
* is {@link JsonToken
*
* @param jsonReader The {@link JsonReader} being read.
* @param nonNullGetter The non-null getter.
* @param <T> The type of the property.
* @return Either null if the current token is {@link JsonToken
* {@code nonNullGetter}.
*/
public static <T> T getNullableProperty(JsonReader jsonReader, Function<JsonReader, T> nonNullGetter) {
return jsonReader.currentToken() == JsonToken.NULL ? null : nonNullGetter.apply(jsonReader);
}
/**
* Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current
* location of {@code jsonReader}.
* <p>
* If the {@code jsonReader} is pointing to {@link JsonToken
* JSON object will be read until completion and returned as a raw JSON string.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The buffered JSON object the {@link JsonReader} was pointing to, or null if it was pointing to
* {@link JsonToken
* @throws IllegalStateException If the {@code jsonReader}'s {@link JsonReader
* one of {@link JsonToken
* {@link JsonToken
*/ |
Should this be based on the type of input stream? If the source is ByteArrayInputStream, this should return true, for e.g. | public boolean isReplayable() {
return false;
} | return false; | public boolean isReplayable() {
return false;
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private final InputStream content;
private final AtomicReference<byte[]> bytes = new AtomicReference<>();
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream) {
this.content = Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
}
@Override
public Long getLength() {
if (bytes.get() != null) {
return (long) bytes.get().length;
}
return null;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
byte[] data = this.bytes.get();
if (data == null) {
bytes.set(getBytes());
data = this.bytes.get();
}
return data;
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content;
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return FluxUtil.toFluxByteBuffer(this.content, STREAM_READ_SIZE);
}
@Override
private byte[] getBytes() {
try {
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
while ((nRead = this.content.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private final InputStream content;
private final AtomicReference<byte[]> bytes = new AtomicReference<>();
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream) {
this.content = Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
}
@Override
public Long getLength() {
if (bytes.get() != null) {
return (long) bytes.get().length;
}
return null;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
byte[] data = this.bytes.get();
if (data == null) {
bytes.set(getBytes());
data = this.bytes.get();
}
return data;
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content;
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return FluxUtil.toFluxByteBuffer(this.content, STREAM_READ_SIZE);
}
@Override
private byte[] getBytes() {
try {
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
while ((nRead = this.content.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} |
That's going to be part of https://github.com/Azure/azure-sdk-for-java/issues/28799 . In current form even `ByteArrayInputStream` fails the test - second call to `consume(data.toStream)` fails because stream is at eof. | public boolean isReplayable() {
return false;
} | return false; | public boolean isReplayable() {
return false;
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private final InputStream content;
private final AtomicReference<byte[]> bytes = new AtomicReference<>();
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream) {
this.content = Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
}
@Override
public Long getLength() {
if (bytes.get() != null) {
return (long) bytes.get().length;
}
return null;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
byte[] data = this.bytes.get();
if (data == null) {
bytes.set(getBytes());
data = this.bytes.get();
}
return data;
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content;
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return FluxUtil.toFluxByteBuffer(this.content, STREAM_READ_SIZE);
}
@Override
private byte[] getBytes() {
try {
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
while ((nRead = this.content.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private final InputStream content;
private final AtomicReference<byte[]> bytes = new AtomicReference<>();
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream) {
this.content = Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
}
@Override
public Long getLength() {
if (bytes.get() != null) {
return (long) bytes.get().length;
}
return null;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
byte[] data = this.bytes.get();
if (data == null) {
bytes.set(getBytes());
data = this.bytes.get();
}
return data;
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content;
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
return FluxUtil.toFluxByteBuffer(this.content, STREAM_READ_SIZE);
}
@Override
private byte[] getBytes() {
try {
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
while ((nRead = this.content.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} |
should this get into respective `SyncProxy`/`AsyncProxy` ? I.e. I'd expect only one `isReactive` switch in this class and respective implementations carry on. | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
} | } | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
AccessibleByteArrayOutputStream exposes internall array, we should create BinaryData from array not from stream here. I.e. this should be same as line 310. | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(BinaryData.fromStream(new ByteArrayInputStream(stream.toByteArray(), 0, stream.size())));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(stream.toByteArray());
}
} | request.setBody(BinaryData.fromStream(new ByteArrayInputStream(stream.toByteArray(), 0, stream.size()))); | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
byte[] serializedBytes = serializerAdapter.serializeToBytes(bodyContentObject, SerializerEncoding.JSON);
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(BinaryData.fromBytes(serializedBytes));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(array);
}
} else {
byte[] serializedBytes = serializerAdapter
.serializeToBytes(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()));
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(serializedBytes);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequestBase(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing
* additional context information.
*
* @param method Service method being called.
* @param context Context information about the current service call.
* @return The updated context containing the span context.
*/
private Context startTracingSpan(Method method, Context context) {
if (!TracerProxy.isTracingEnabled()) {
return context;
}
if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) {
return context;
}
String spanName = interfaceParser.getServiceName() + "." + method.getName();
context = TracerProxy.setSpanName(spanName, context);
return TracerProxy.start(spanName, context);
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = response.getSourceResponse().getBodyAsBinaryData().toBytes();
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
private static void endTracingSpan(HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse, Throwable throwable, Context tracingContext) {
if (tracingContext == null) {
return;
}
Object disableTracingValue = (tracingContext.getData(Tracer.DISABLE_TRACING_KEY).isPresent()
? tracingContext.getData(Tracer.DISABLE_TRACING_KEY).get() : null);
boolean disableTracing = Boolean.TRUE.equals(disableTracingValue != null ? disableTracingValue : false);
if (disableTracing) {
return;
}
int statusCode = 0;
if (httpDecodedResponse != null) {
statusCode = httpDecodedResponse.getSourceResponse().getStatusCode();
} else if (throwable != null) {
if (throwable instanceof HttpResponseException) {
HttpResponseException exception = (HttpResponseException) throwable;
statusCode = exception.getResponse().getStatusCode();
}
}
TracerProxy.end(statusCode, throwable, tracingContext);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequest(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
BinaryData binaryData = response.getSourceResponse().getBodyAsBinaryData();
byte[] responseBodyBytes = binaryData != null ? binaryData.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
} |
not every buffer has array. We should if buffer.hasArray() { array } else { allocate array read to array. } We may also consider creating ByteBufferContent and add BinaryData.fromByteBuffer (as a backlog item). | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(BinaryData.fromStream(new ByteArrayInputStream(stream.toByteArray(), 0, stream.size())));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(stream.toByteArray());
}
} | request.setBody(((ByteBuffer) bodyContentObject).array()); | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
byte[] serializedBytes = serializerAdapter.serializeToBytes(bodyContentObject, SerializerEncoding.JSON);
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(BinaryData.fromBytes(serializedBytes));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(array);
}
} else {
byte[] serializedBytes = serializerAdapter
.serializeToBytes(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()));
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(serializedBytes);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequestBase(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing
* additional context information.
*
* @param method Service method being called.
* @param context Context information about the current service call.
* @return The updated context containing the span context.
*/
private Context startTracingSpan(Method method, Context context) {
if (!TracerProxy.isTracingEnabled()) {
return context;
}
if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) {
return context;
}
String spanName = interfaceParser.getServiceName() + "." + method.getName();
context = TracerProxy.setSpanName(spanName, context);
return TracerProxy.start(spanName, context);
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = response.getSourceResponse().getBodyAsBinaryData().toBytes();
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
private static void endTracingSpan(HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse, Throwable throwable, Context tracingContext) {
if (tracingContext == null) {
return;
}
Object disableTracingValue = (tracingContext.getData(Tracer.DISABLE_TRACING_KEY).isPresent()
? tracingContext.getData(Tracer.DISABLE_TRACING_KEY).get() : null);
boolean disableTracing = Boolean.TRUE.equals(disableTracingValue != null ? disableTracingValue : false);
if (disableTracing) {
return;
}
int statusCode = 0;
if (httpDecodedResponse != null) {
statusCode = httpDecodedResponse.getSourceResponse().getStatusCode();
} else if (throwable != null) {
if (throwable instanceof HttpResponseException) {
HttpResponseException exception = (HttpResponseException) throwable;
statusCode = exception.getResponse().getStatusCode();
}
}
TracerProxy.end(statusCode, throwable, tracingContext);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequest(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
BinaryData binaryData = response.getSourceResponse().getBodyAsBinaryData();
byte[] responseBodyBytes = binaryData != null ? binaryData.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
} |
we might not need both at the same time ? | private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
} | this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser); | private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
Do you think we can move this to RestProxyBase and have that navigate/forward it to the appropriate class/method Sync/AsyncRestProxy ? | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
} | boolean isReactive = isReactive(methodParser.getReturnType()); | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
can we have `methodParser.isReactive()` ? | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
} | boolean isReactive = isReactive(methodParser.getReturnType()); | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
I'd lean towards this solution as the Proxy API should never change during runtime so it's able to be pre-computed | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
} | boolean isReactive = isReactive(methodParser.getReturnType()); | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
I'd remove this try/catch and make it part of the sync and async RestProxy implementations as for sync we'll want to throw but for async we'll want to return Mono.error | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
try {
final SwaggerMethodParser methodParser = getMethodParser(method);
HttpRequest request;
boolean isReactive = isReactive(methodParser.getReturnType());
if (isReactive) {
request = asyncRestProxy.createHttpRequest(methodParser, args);
} else {
request = syncRestProxy.createHttpRequest(methodParser, args);
}
Context context = methodParser.setContext(args);
RequestOptions options = methodParser.setRequestOptions(args);
context = RestProxyUtils.mergeRequestOptionsContext(context, options);
context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName())
.addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType()));
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, request, context);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(e));
}
} | } catch (IOException e) { | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class);
private final SwaggerInterfaceParser interfaceParser;
private AsyncRestProxy asyncRestProxy;
private SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
This is subtly wrong, `AccessibleByteArrayOutputStream.toByteArray` will return the direct buffer but the direct buffer may be larger than the contents that have been written to it. This should be ```java request.setBody(ByteBuffer.wrap(stream.toByteArray(), 0, stream.count())); ``` | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(BinaryData.fromStream(new ByteArrayInputStream(stream.toByteArray(), 0, stream.size())));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(stream.toByteArray());
}
} | request.setBody(stream.toByteArray()); | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
byte[] serializedBytes = serializerAdapter.serializeToBytes(bodyContentObject, SerializerEncoding.JSON);
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(BinaryData.fromBytes(serializedBytes));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(array);
}
} else {
byte[] serializedBytes = serializerAdapter
.serializeToBytes(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()));
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(serializedBytes);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequestBase(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing
* additional context information.
*
* @param method Service method being called.
* @param context Context information about the current service call.
* @return The updated context containing the span context.
*/
private Context startTracingSpan(Method method, Context context) {
if (!TracerProxy.isTracingEnabled()) {
return context;
}
if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) {
return context;
}
String spanName = interfaceParser.getServiceName() + "." + method.getName();
context = TracerProxy.setSpanName(spanName, context);
return TracerProxy.start(spanName, context);
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = response.getSourceResponse().getBodyAsBinaryData().toBytes();
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
private static void endTracingSpan(HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse, Throwable throwable, Context tracingContext) {
if (tracingContext == null) {
return;
}
Object disableTracingValue = (tracingContext.getData(Tracer.DISABLE_TRACING_KEY).isPresent()
? tracingContext.getData(Tracer.DISABLE_TRACING_KEY).get() : null);
boolean disableTracing = Boolean.TRUE.equals(disableTracingValue != null ? disableTracingValue : false);
if (disableTracing) {
return;
}
int statusCode = 0;
if (httpDecodedResponse != null) {
statusCode = httpDecodedResponse.getSourceResponse().getStatusCode();
} else if (throwable != null) {
if (throwable instanceof HttpResponseException) {
HttpResponseException exception = (HttpResponseException) throwable;
statusCode = exception.getResponse().getStatusCode();
}
}
TracerProxy.end(statusCode, throwable, tracingContext);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequest(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
BinaryData binaryData = response.getSourceResponse().getBodyAsBinaryData();
byte[] responseBodyBytes = binaryData != null ? binaryData.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
} |
Instead of using an output stream we could just call `serializerAdapter.serializeToBytes` which may remove duplicating data when writing to the output stream. | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(BinaryData.fromStream(new ByteArrayInputStream(stream.toByteArray(), 0, stream.size())));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream);
request.setHeader("Content-Length", String.valueOf(stream.size()));
request.setBody(stream.toByteArray());
}
} | serializerAdapter.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); | public void updateRequest(RequestDataConfiguration requestDataConfiguration, SerializerAdapter serializerAdapter) throws IOException {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (isJson) {
byte[] serializedBytes = serializerAdapter.serializeToBytes(bodyContentObject, SerializerEncoding.JSON);
ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream();
serializerAdapter.serialize(bodyContentObject, SerializerEncoding.JSON, stream);
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(BinaryData.fromBytes(serializedBytes));
} else if (bodyContentObject instanceof byte[]) {
request.setBody((byte[]) bodyContentObject);
} else if (bodyContentObject instanceof String) {
final String bodyContentString = (String) bodyContentObject;
if (!bodyContentString.isEmpty()) {
request.setBody(bodyContentString);
}
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(((ByteBuffer) bodyContentObject).array());
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(array);
}
} else {
byte[] serializedBytes = serializerAdapter
.serializeToBytes(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()));
request.setHeader("Content-Length", String.valueOf(serializedBytes.length));
request.setBody(serializedBytes);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequestBase(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing
* additional context information.
*
* @param method Service method being called.
* @param context Context information about the current service call.
* @return The updated context containing the span context.
*/
private Context startTracingSpan(Method method, Context context) {
if (!TracerProxy.isTracingEnabled()) {
return context;
}
if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) {
return context;
}
String spanName = interfaceParser.getServiceName() + "." + method.getName();
context = TracerProxy.setSpanName(spanName, context);
return TracerProxy.start(spanName, context);
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = response.getSourceResponse().getBodyAsBinaryData().toBytes();
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
private static void endTracingSpan(HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse, Throwable throwable, Context tracingContext) {
if (tracingContext == null) {
return;
}
Object disableTracingValue = (tracingContext.getData(Tracer.DISABLE_TRACING_KEY).isPresent()
? tracingContext.getData(Tracer.DISABLE_TRACING_KEY).get() : null);
boolean disableTracing = Boolean.TRUE.equals(disableTracingValue != null ? disableTracingValue : false);
if (disableTracing) {
return;
}
int statusCode = 0;
if (httpDecodedResponse != null) {
statusCode = httpDecodedResponse.getSourceResponse().getStatusCode();
} else if (throwable != null) {
if (throwable instanceof HttpResponseException) {
HttpResponseException exception = (HttpResponseException) throwable;
statusCode = exception.getResponse().getStatusCode();
}
}
TracerProxy.end(statusCode, throwable, tracingContext);
}
} | class SyncRestProxy extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
*/
public SyncRestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
HttpResponse send(HttpRequest request, Context contextData) {
return httpPipeline.sendSync(request, contextData);
}
public HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException {
return createHttpRequest(methodParser, serializer, false, args);
}
@Override
public Object invoke(Object proxy, Method method, RequestOptions options, EnumSet<ErrorOptions> errorOptions, Consumer<HttpRequest> requestCallback, SwaggerMethodParser methodParser, HttpRequest request, Context context) {
HttpResponseDecoder.HttpDecodedResponse decodedResponse = null;
Throwable throwable = null;
try {
context = startTracingSpan(method, context);
if (options != null && requestCallback != null) {
requestCallback.accept(request);
}
if (request.getBodyAsBinaryData() != null) {
request.setBody(RestProxyUtils.validateLengthSync(request));
}
final HttpResponse response = send(request, context);
decodedResponse = this.decoder.decodeSync(response, methodParser);
return handleRestReturnType(decodedResponse, methodParser, methodParser.getReturnType(), context, options, errorOptions);
} catch (RuntimeException e) {
throwable = e;
throw LOGGER.logExceptionAsError(e);
} finally {
if (decodedResponse != null || throwable != null) {
endTracingSpan(decodedResponse, throwable, context);
}
}
}
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.
*
* @param decodedResponse The HttpResponse to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
* @return An async-version of the provided decodedResponse.
*/
private HttpResponseDecoder.HttpDecodedResponse ensureExpectedStatus(final HttpResponseDecoder.HttpDecodedResponse decodedResponse,
final SwaggerMethodParser methodParser, RequestOptions options, EnumSet<ErrorOptions> errorOptions) {
final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)
|| (options != null && errorOptions.contains(ErrorOptions.NO_THROW))) {
return decodedResponse;
}
Exception e;
BinaryData responseData = decodedResponse.getSourceResponse().getBodyAsBinaryData();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), null, null);
} else {
Object decodedBody = decodedResponse.getDecodedBodySync(responseBytes);
e = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
decodedResponse.getSourceResponse(), responseBytes, decodedBody);
}
if (e instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) e);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private Object handleRestResponseReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser,
final Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
if (entityType.equals(StreamResponse.class)) {
return createResponse(response, entityType, null);
}
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
response.getSourceResponse().close();
return createResponse(response, entityType, null);
} else {
Object bodyAsObject = handleBodyReturnType(response, methodParser, bodyType);
Response<?> httpResponse = createResponse(response, entityType, bodyAsObject);
if (httpResponse == null) {
return createResponse(response, entityType, null);
}
return httpResponse;
}
} else {
return handleBodyReturnType(response, methodParser, entityType);
}
}
private Object handleBodyReturnType(final HttpResponseDecoder.HttpDecodedResponse response,
final SwaggerMethodParser methodParser, final Type entityType) {
final int responseStatusCode = response.getSourceResponse().getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(
entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
boolean isSuccess = (responseStatusCode / 100) == 2;
result = isSuccess;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
BinaryData binaryData = response.getSourceResponse().getBodyAsBinaryData();
byte[] responseBodyBytes = binaryData != null ? binaryData.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = response.getSourceResponse().getBodyAsBinaryData();
} else {
result = response.getDecodedBodySync((byte[]) null);
}
return result;
}
/**
* Handle the provided asynchronous HTTP response and return the deserialized value.
*
* @param httpDecodedResponse the asynchronous HTTP response to the original HTTP request
* @param methodParser the SwaggerMethodParser that the request originates from
* @param returnType the type of value that will be returned
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return the deserialized result
*/
private Object handleRestReturnType(final HttpResponseDecoder.HttpDecodedResponse httpDecodedResponse,
final SwaggerMethodParser methodParser,
final Type returnType,
final Context context,
final RequestOptions options,
EnumSet<ErrorOptions> errorOptions) {
final HttpResponseDecoder.HttpDecodedResponse expectedResponse =
ensureExpectedStatus(httpDecodedResponse, methodParser, options, errorOptions);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType,
Void.class)) {
result = expectedResponse;
} else {
result = handleRestResponseReturnType(httpDecodedResponse, methodParser, returnType);
}
return result;
}
} |
Perhaps we should do `final SwaggerMethodParser methodParser = getMethodParser(method, args);` if processing request options is essential ? | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | RequestOptions options = methodParser.setRequestOptions(args); | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
we need both the parser and the request options here to be passed to downstream. | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | RequestOptions options = methodParser.setRequestOptions(args); | public Object invoke(Object proxy, final Method method, Object[] args) {
RestProxyUtils.validateResumeOperationIsNotPresent(method);
final SwaggerMethodParser methodParser = getMethodParser(method);
RequestOptions options = methodParser.setRequestOptions(args);
boolean isReactive = methodParser.isReactive();
if (isReactive) {
return asyncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
} else {
return syncRestProxy.invoke(proxy, method, options, options != null ? options.getErrorOptions() : null,
options != null ? options.getRequestCallback() : null, methodParser, isReactive, args);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} | class RestProxy implements InvocationHandler {
private final SwaggerInterfaceParser interfaceParser;
private final AsyncRestProxy asyncRestProxy;
private final HttpPipeline httpPipeline;
private final SyncRestProxy syncRestProxy;
/**
* Create a RestProxy.
*
* @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer the serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser the parser that contains information about the interface describing REST API methods that
* this RestProxy "implements".
*/
private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) {
this.interfaceParser = interfaceParser;
this.asyncRestProxy = new AsyncRestProxy(httpPipeline, serializer, interfaceParser);
this.syncRestProxy = new SyncRestProxy(httpPipeline, serializer, interfaceParser);
this.httpPipeline = httpPipeline;
}
/**
* Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this
* RestProxy was created to "implement".
*
* @param method the method to get a SwaggerMethodParser for
* @return the SwaggerMethodParser for the provided method
*/
private SwaggerMethodParser getMethodParser(Method method) {
return interfaceParser.getMethodParser(method);
}
/**
* Send the provided request asynchronously, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send
* @param contextData the context
* @return a {@link Mono} that emits HttpResponse asynchronously
*/
public Mono<HttpResponse> send(HttpRequest request, Context contextData) {
return httpPipeline.send(request, contextData);
}
@Override
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface) {
return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests
* @param <A> the type of the Swagger interface
* @return a proxy implementation of the provided Swagger interface
*/
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) {
return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer());
}
/**
* Create a proxy implementation of the provided Swagger interface.
*
* @param swaggerInterface the Swagger interface to provide a proxy implementation for
* @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests
* @param serializer the serializer that will be used to convert POJOs to and from request and response bodies
* @param <A> the type of the Swagger interface.
* @return a proxy implementation of the provided Swagger interface
*/
@SuppressWarnings("unchecked")
public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) {
final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer);
final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser);
return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface},
restProxy);
}
} |
extract to two local variables | public void userAgentTest(CapturedOutput output) {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureAppConfigurationAutoConfiguration.class))
.withPropertyValues(
"spring.cloud.azure.profile.tenant-id=sample",
"spring.cloud.azure.credential.client-id=sample",
"spring.cloud.azure.credential.client-secret=sample",
"spring.cloud.azure.appconfiguration.enabled=true",
"spring.cloud.azure.appconfiguration.endpoint=https:
"spring.cloud.azure.appconfiguration.client.logging.level=headers",
"spring.cloud.azure.appconfiguration.client.logging.allowed-header-names=User-Agent",
"spring.cloud.azure.appconfiguration.retry.fixed.delay=1",
"spring.cloud.azure.appconfiguration.retry.fixed.max-retries=0",
"spring.cloud.azure.appconfiguration.retry.mode=fixed"
)
.withBean(AzureGlobalProperties.class, AzureGlobalProperties::new)
.run(context -> {
assertThat(context).hasSingleBean(AzureAppConfigurationAutoConfiguration.class);
assertThat(context).hasSingleBean(AzureAppConfigurationProperties.class);
assertThat(context).hasSingleBean(ConfigurationClient.class);
assertThat(context).hasSingleBean(ConfigurationAsyncClient.class);
assertThat(context).hasSingleBean(ConfigurationClientBuilder.class);
assertThat(context).hasSingleBean(ConfigurationClientBuilderFactory.class);
ConfigurationClient configurationClient = context.getBean(ConfigurationClient.class);
ConfigurationSetting configurationSetting = new ConfigurationSetting();
configurationSetting.setKey("key1");
try {
configurationClient.getConfigurationSetting(configurationSetting);
} catch (Exception exception) {
}
String allOutput = output.getAll();
assertTrue(allOutput.contains(String.format("User-Agent:%s",
AzureSpringIdentifier.AZURE_SPRING_APP_CONFIG)) || allOutput.contains(String.format("\"User-Agent"
+ "\":\"%s", AzureSpringIdentifier.AZURE_SPRING_APP_CONFIG)));
});
} | + "\":\"%s", AzureSpringIdentifier.AZURE_SPRING_APP_CONFIG))); | public void userAgentTest(CapturedOutput output) {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureAppConfigurationAutoConfiguration.class))
.withPropertyValues(
"spring.cloud.azure.profile.tenant-id=sample",
"spring.cloud.azure.credential.client-id=sample",
"spring.cloud.azure.credential.client-secret=sample",
"spring.cloud.azure.appconfiguration.enabled=true",
"spring.cloud.azure.appconfiguration.endpoint=https:
"spring.cloud.azure.appconfiguration.client.logging.level=headers",
"spring.cloud.azure.appconfiguration.client.logging.allowed-header-names=User-Agent",
"spring.cloud.azure.appconfiguration.retry.fixed.delay=1",
"spring.cloud.azure.appconfiguration.retry.fixed.max-retries=0",
"spring.cloud.azure.appconfiguration.retry.mode=fixed"
)
.withBean(AzureGlobalProperties.class, AzureGlobalProperties::new)
.run(context -> {
assertThat(context).hasSingleBean(AzureAppConfigurationAutoConfiguration.class);
assertThat(context).hasSingleBean(AzureAppConfigurationProperties.class);
assertThat(context).hasSingleBean(ConfigurationClient.class);
assertThat(context).hasSingleBean(ConfigurationAsyncClient.class);
assertThat(context).hasSingleBean(ConfigurationClientBuilder.class);
assertThat(context).hasSingleBean(ConfigurationClientBuilderFactory.class);
ConfigurationClient configurationClient = context.getBean(ConfigurationClient.class);
ConfigurationSetting configurationSetting = new ConfigurationSetting();
configurationSetting.setKey("key1");
try {
configurationClient.getConfigurationSetting(configurationSetting);
} catch (Exception exception) {
}
String allOutput = output.getAll();
String format1 = String.format("User-Agent:%s", AzureSpringIdentifier.AZURE_SPRING_APP_CONFIG);
String format2 = String.format("\"User-Agent\":\"%s", AzureSpringIdentifier.AZURE_SPRING_APP_CONFIG);
assertTrue(allOutput.contains(format1) || allOutput.contains(format2));
});
} | class AppConfigurationUserAgentTests {
@Test
} | class AppConfigurationUserAgentTests {
@Test
} |
same here | public void userAgentTest(CapturedOutput output) {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureKeyVaultCertificateAutoConfiguration.class))
.withPropertyValues(
"spring.cloud.azure.profile.tenant-id=sample",
"spring.cloud.azure.credential.client-id=sample",
"spring.cloud.azure.credential.client-secret=sample",
"spring.cloud.azure.keyvault.certificate.enabled=true",
"spring.cloud.azure.keyvault.certificate.endpoint=https:
"spring.cloud.azure.keyvault.certificate.client.logging.level=headers",
"spring.cloud.azure.keyvault.certificate.client.logging.allowed-header-names=User-Agent",
"spring.cloud.azure.keyvault.certificate.retry.fixed.delay=1s",
"spring.cloud.azure.keyvault.certificate.retry.fixed.max-retries=0",
"spring.cloud.azure.keyvault.certificate.retry.mode=fixed"
)
.withBean(AzureGlobalProperties.class, AzureGlobalProperties::new)
.run(context -> {
assertThat(context).hasSingleBean(AzureKeyVaultCertificateAutoConfiguration.class);
assertThat(context).hasSingleBean(AzureKeyVaultCertificateProperties.class);
assertThat(context).hasSingleBean(CertificateClient.class);
assertThat(context).hasSingleBean(CertificateAsyncClient.class);
assertThat(context).hasSingleBean(CertificateClientBuilder.class);
assertThat(context).hasSingleBean(CertificateClientBuilderFactory.class);
CertificateClient certificateClient = context.getBean(CertificateClient.class);
try {
certificateClient.getCertificate("test");
} catch (Exception exception) {
}
String allOutput = output.getAll();
assertTrue(allOutput.contains(String.format("User-Agent:%s",
AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_CERTIFICATES)) || allOutput.contains(String.format(
"\"User-Agent\":\"%s", AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_CERTIFICATES)));
});
} | }); | public void userAgentTest(CapturedOutput output) {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureKeyVaultCertificateAutoConfiguration.class))
.withPropertyValues(
"spring.cloud.azure.profile.tenant-id=sample",
"spring.cloud.azure.credential.client-id=sample",
"spring.cloud.azure.credential.client-secret=sample",
"spring.cloud.azure.keyvault.certificate.enabled=true",
"spring.cloud.azure.keyvault.certificate.endpoint=https:
"spring.cloud.azure.keyvault.certificate.client.logging.level=headers",
"spring.cloud.azure.keyvault.certificate.client.logging.allowed-header-names=User-Agent",
"spring.cloud.azure.keyvault.certificate.retry.fixed.delay=1s",
"spring.cloud.azure.keyvault.certificate.retry.fixed.max-retries=0",
"spring.cloud.azure.keyvault.certificate.retry.mode=fixed"
)
.withBean(AzureGlobalProperties.class, AzureGlobalProperties::new)
.run(context -> {
assertThat(context).hasSingleBean(AzureKeyVaultCertificateAutoConfiguration.class);
assertThat(context).hasSingleBean(AzureKeyVaultCertificateProperties.class);
assertThat(context).hasSingleBean(CertificateClient.class);
assertThat(context).hasSingleBean(CertificateAsyncClient.class);
assertThat(context).hasSingleBean(CertificateClientBuilder.class);
assertThat(context).hasSingleBean(CertificateClientBuilderFactory.class);
CertificateClient certificateClient = context.getBean(CertificateClient.class);
try {
certificateClient.getCertificate("test");
} catch (Exception exception) {
}
String allOutput = output.getAll();
String format1 = String.format("User-Agent:%s",
AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_CERTIFICATES);
String format2 = String.format("\"User-Agent\":\"%s",
AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_CERTIFICATES);
assertTrue(allOutput.contains(format1) || allOutput.contains(format2));
});
} | class KeyVaultCertificateUserAgentTests {
@Test
} | class KeyVaultCertificateUserAgentTests {
@Test
} |
same here | public void userAgentTest(CapturedOutput output) {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureKeyVaultSecretAutoConfiguration.class))
.withPropertyValues(
"spring.cloud.azure.profile.tenant-id=sample",
"spring.cloud.azure.credential.client-id=sample",
"spring.cloud.azure.credential.client-secret=sample",
"spring.cloud.azure.keyvault.secret.enabled=true",
"spring.cloud.azure.keyvault.secret.endpoint=https:
"spring.cloud.azure.keyvault.secret.client.logging.level=headers",
"spring.cloud.azure.keyvault.secret.client.logging.allowed-header-names=User-Agent",
"spring.cloud.azure.keyvault.secret.retry.fixed.delay=1",
"spring.cloud.azure.keyvault.secret.retry.fixed.max-retries=0",
"spring.cloud.azure.keyvault.secret.retry.mode=fixed"
)
.withBean(AzureGlobalProperties.class, AzureGlobalProperties::new)
.run(context -> {
assertThat(context).hasSingleBean(AzureKeyVaultSecretAutoConfiguration.class);
assertThat(context).hasSingleBean(AzureKeyVaultSecretProperties.class);
assertThat(context).hasSingleBean(SecretClientBuilderFactory.class);
assertThat(context).hasSingleBean(SecretClientBuilder.class);
assertThat(context).hasSingleBean(SecretClient.class);
assertThat(context).hasSingleBean(SecretAsyncClient.class);
SecretClient secretClient = context.getBean(SecretClient.class);
try {
secretClient.getSecret("name1");
} catch (Exception exception) {
}
String allOutput = output.getAll();
assertTrue(allOutput.contains(String.format("User-Agent:%s",
AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS)) || allOutput.contains(String.format(
"\"User-Agent\":\"%s", AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS)));
});
} | "\"User-Agent\":\"%s", AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS))); | public void userAgentTest(CapturedOutput output) {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(AzureKeyVaultSecretAutoConfiguration.class))
.withPropertyValues(
"spring.cloud.azure.profile.tenant-id=sample",
"spring.cloud.azure.credential.client-id=sample",
"spring.cloud.azure.credential.client-secret=sample",
"spring.cloud.azure.keyvault.secret.enabled=true",
"spring.cloud.azure.keyvault.secret.endpoint=https:
"spring.cloud.azure.keyvault.secret.client.logging.level=headers",
"spring.cloud.azure.keyvault.secret.client.logging.allowed-header-names=User-Agent",
"spring.cloud.azure.keyvault.secret.retry.fixed.delay=1",
"spring.cloud.azure.keyvault.secret.retry.fixed.max-retries=0",
"spring.cloud.azure.keyvault.secret.retry.mode=fixed"
)
.withBean(AzureGlobalProperties.class, AzureGlobalProperties::new)
.run(context -> {
assertThat(context).hasSingleBean(AzureKeyVaultSecretAutoConfiguration.class);
assertThat(context).hasSingleBean(AzureKeyVaultSecretProperties.class);
assertThat(context).hasSingleBean(SecretClientBuilderFactory.class);
assertThat(context).hasSingleBean(SecretClientBuilder.class);
assertThat(context).hasSingleBean(SecretClient.class);
assertThat(context).hasSingleBean(SecretAsyncClient.class);
SecretClient secretClient = context.getBean(SecretClient.class);
try {
secretClient.getSecret("name1");
} catch (Exception exception) {
}
String allOutput = output.getAll();
String format1 = String.format("User-Agent:%s", AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS);
String format2 = String.format("\"User-Agent\":\"%s",
AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS);
assertTrue(allOutput.contains(format1) || allOutput.contains(format2));
});
} | class KeyVaultSecretUserAgentTests {
@Test
} | class KeyVaultSecretUserAgentTests {
@Test
} |
```suggestion "'progressListener' must not be null"); ``` | private ProgressReporter(ProgressListener progressListener) {
this.progressListener = Objects.requireNonNull(progressListener,
"'progressReceiver' must not be null");
this.parent = null;
} | "'progressReceiver' must not be null"); | private ProgressReporter(ProgressListener progressListener) {
this.progressListener = Objects.requireNonNull(progressListener,
"'progressListener' must not be null");
this.parent = null;
} | class ProgressReporter {
private final ProgressListener progressListener;
private final ProgressReporter parent;
private static final AtomicLongFieldUpdater<ProgressReporter> PROGRESS_ATOMIC_UPDATER =
AtomicLongFieldUpdater.newUpdater(ProgressReporter.class, "progress");
private volatile long progress;
/**
* Creates top level {@link ProgressReporter}.
* Only top level {@link ProgressReporter} can have {@link ProgressListener}.
* @param progressListener The {@link ProgressListener} to be notified about progress.
*/
/**
* Creates child {@link ProgressReporter}. It tracks it's own progress and reports to parent.
* @param parent The parent {@link ProgressReporter}. Must not be null.
*/
private ProgressReporter(ProgressReporter parent) {
this.parent = Objects.requireNonNull(parent,
"'parent' must not be null");
this.progressListener = null;
}
/**
* Creates a {@link ProgressReporter} that notifies {@link ProgressListener}.
* @param progressListener The {@link ProgressListener} to be notified about progress. Must not be null.
* @return The {@link ProgressReporter} instance.
* @throws NullPointerException If {@code progressReceiver} is null.
*/
public static ProgressReporter withProgressListener(ProgressListener progressListener) {
return new ProgressReporter(progressListener);
}
/**
* Creates child {@link ProgressReporter} that can be used to track sub-progress when tracked activity spans
* across concurrent processes. Child {@link ProgressReporter} notifies parent about progress and
* parent notifies {@link ProgressListener}.
* @return The child {@link ProgressReporter}.
*/
public ProgressReporter createChild() {
return new ProgressReporter(this);
}
/**
* Resets progress to zero and notifies.
* <p>
* If this is a root {@link ProgressReporter} then attached {@link ProgressListener} is notified.
* Otherwise, already accumulated progress is subtracted from the parent {@link ProgressReporter}'s progress.
* </p>
*/
public void reset() {
long accumulated = PROGRESS_ATOMIC_UPDATER.getAndSet(this, 0L);
if (parent != null) {
parent.reportProgress(-1L * accumulated);
}
if (progressListener != null) {
progressListener.handleProgress(0L);
}
}
/**
* Accumulates the provided {@code progress} and notifies.
*
* <p>
* If this is a root {@link ProgressReporter}
* then attached {@link ProgressListener} is notified about accumulated progress.
* Otherwise, the provided {@code progress} is reported to the parent {@link ProgressReporter}.
* </p>
*
* @param progress The number to be accumulated.
*/
public void reportProgress(long progress) {
long totalProgress = PROGRESS_ATOMIC_UPDATER.addAndGet(this, progress);
if (parent != null) {
parent.reportProgress(progress);
}
if (progressListener != null) {
progressListener.handleProgress(totalProgress);
}
}
} | class ProgressReporter {
private final ProgressListener progressListener;
private final ProgressReporter parent;
private static final AtomicLongFieldUpdater<ProgressReporter> PROGRESS_ATOMIC_UPDATER =
AtomicLongFieldUpdater.newUpdater(ProgressReporter.class, "progress");
private volatile long progress;
/**
* Creates top level {@link ProgressReporter}.
* Only top level {@link ProgressReporter} can have {@link ProgressListener}.
* @param progressListener The {@link ProgressListener} to be notified about progress.
*/
/**
* Creates child {@link ProgressReporter}. It tracks it's own progress and reports to parent.
* @param parent The parent {@link ProgressReporter}. Must not be null.
*/
private ProgressReporter(ProgressReporter parent) {
this.parent = Objects.requireNonNull(parent,
"'parent' must not be null");
this.progressListener = null;
}
/**
* Creates a {@link ProgressReporter} that notifies {@link ProgressListener}.
* @param progressListener The {@link ProgressListener} to be notified about progress. Must not be null.
* @return The {@link ProgressReporter} instance.
* @throws NullPointerException If {@code progressReceiver} is null.
*/
public static ProgressReporter withProgressListener(ProgressListener progressListener) {
return new ProgressReporter(progressListener);
}
/**
* Creates child {@link ProgressReporter} that can be used to track sub-progress when tracked activity spans
* across concurrent processes. Child {@link ProgressReporter} notifies parent about progress and
* parent notifies {@link ProgressListener}.
* @return The child {@link ProgressReporter}.
*/
public ProgressReporter createChild() {
return new ProgressReporter(this);
}
/**
* Resets progress to zero and notifies.
* <p>
* If this is a root {@link ProgressReporter} then attached {@link ProgressListener} is notified.
* Otherwise, already accumulated progress is subtracted from the parent {@link ProgressReporter}'s progress.
* </p>
*/
public void reset() {
long accumulated = PROGRESS_ATOMIC_UPDATER.getAndSet(this, 0L);
if (parent != null) {
parent.reportProgress(-1L * accumulated);
}
if (progressListener != null) {
progressListener.handleProgress(0L);
}
}
/**
* Accumulates the provided {@code progress} and notifies.
*
* <p>
* If this is a root {@link ProgressReporter}
* then attached {@link ProgressListener} is notified about accumulated progress.
* Otherwise, the provided {@code progress} is reported to the parent {@link ProgressReporter}.
* </p>
*
* @param progress The number to be accumulated.
*/
public void reportProgress(long progress) {
long totalProgress = PROGRESS_ATOMIC_UPDATER.addAndGet(this, progress);
if (parent != null) {
parent.reportProgress(progress);
}
if (progressListener != null) {
progressListener.handleProgress(totalProgress);
}
}
} |
This sample passes LIVE test | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(RESOURCE_GROUP_NAME).withRegion(REGION).create();
System.out.println("Resource group created with name " + RESOURCE_GROUP_NAME);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(EVENT_HUB_NAMESPACE)
.withRegion(REGION)
.withExistingResourceGroup(RESOURCE_GROUP_NAME)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(EVENT_HUB_NAME)
.withExistingNamespace(RESOURCE_GROUP_NAME, EVENT_HUB_NAMESPACE)
.withNewManageRule(EVENT_HUB_RULE_NAME)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(TOPIC_NAME)
.withRegion(REGION)
.withExistingResourceGroup(RESOURCE_GROUP_NAME)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.topicEventSubscriptions()
.define(EVENT_SUBSCRIPTION_NAME)
.withExistingTopic(resourceGroup.name(), eventGridTopic.name())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(RESOURCE_GROUP_NAME, TOPIC_NAME).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n", eventData.getSequenceNumber(), contents);
}, error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(RESOURCE_GROUP_NAME);
}
} | EventSubscription eventSubscription = eventGridManager.topicEventSubscriptions() | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(RESOURCE_GROUP_NAME).withRegion(REGION).create();
System.out.println("Resource group created with name " + RESOURCE_GROUP_NAME);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(EVENT_HUB_NAMESPACE)
.withRegion(REGION)
.withExistingResourceGroup(RESOURCE_GROUP_NAME)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(EVENT_HUB_NAME)
.withExistingNamespace(RESOURCE_GROUP_NAME, EVENT_HUB_NAMESPACE)
.withNewManageRule(EVENT_HUB_RULE_NAME)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(TOPIC_NAME)
.withRegion(REGION)
.withExistingResourceGroup(RESOURCE_GROUP_NAME)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.topicEventSubscriptions()
.define(EVENT_SUBSCRIPTION_NAME)
.withExistingTopic(resourceGroup.name(), eventGridTopic.name())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(RESOURCE_GROUP_NAME, TOPIC_NAME).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n", eventData.getSequenceNumber(), contents);
}, error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(RESOURCE_GROUP_NAME);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region REGION = Region.US_CENTRAL;
private static final String RESOURCE_GROUP_NAME = "rg" + randomPadding();
private static final String EVENT_HUB_NAME = "eh" + randomPadding();
private static final String EVENT_HUB_NAMESPACE = "ehNamespace" + randomPadding();
private static final String TOPIC_NAME = "myTopicName" + randomPadding();
private static final String EVENT_SUBSCRIPTION_NAME = "eventSubscription" + randomPadding();
private static final String EVENT_HUB_RULE_NAME = "myManagementRule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region REGION = Region.US_CENTRAL;
private static final String RESOURCE_GROUP_NAME = "rg" + randomPadding();
private static final String EVENT_HUB_NAME = "eh" + randomPadding();
private static final String EVENT_HUB_NAMESPACE = "ehNamespace" + randomPadding();
private static final String TOPIC_NAME = "myTopicName" + randomPadding();
private static final String EVENT_SUBSCRIPTION_NAME = "eventSubscription" + randomPadding();
private static final String EVENT_HUB_RULE_NAME = "myManagementRule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Could this be replaced with `Collections.emptyList()` so we don't initialise a new array? | private AuthorizationRulesWrapper() {
this.items = new ArrayList<AuthorizationRuleImpl>();
} | this.items = new ArrayList<AuthorizationRuleImpl>(); | private AuthorizationRulesWrapper() {
this.items = Collections.emptyList();
} | class AuthorizationRulesWrapper {
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
private final List<AuthorizationRuleImpl> items;
@JsonCreator
@JsonCreator
private AuthorizationRulesWrapper(
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
List<AuthorizationRuleImpl> items) {
this.items = items;
}
} | class AuthorizationRulesWrapper {
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
private final List<AuthorizationRuleImpl> items;
@JsonCreator
@JsonCreator
private AuthorizationRulesWrapper(
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
List<AuthorizationRuleImpl> items) {
this.items = items;
}
} |
Done | private AuthorizationRulesWrapper() {
this.items = new ArrayList<AuthorizationRuleImpl>();
} | this.items = new ArrayList<AuthorizationRuleImpl>(); | private AuthorizationRulesWrapper() {
this.items = Collections.emptyList();
} | class AuthorizationRulesWrapper {
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
private final List<AuthorizationRuleImpl> items;
@JsonCreator
@JsonCreator
private AuthorizationRulesWrapper(
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
List<AuthorizationRuleImpl> items) {
this.items = items;
}
} | class AuthorizationRulesWrapper {
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
private final List<AuthorizationRuleImpl> items;
@JsonCreator
@JsonCreator
private AuthorizationRulesWrapper(
@JacksonXmlProperty(localName = "AuthorizationRule",
namespace = "http:
List<AuthorizationRuleImpl> items) {
this.items = items;
}
} |
Mind trying something out here, and this can be done later. When JsonGenerator is in an array state it should implicitly handle adding `,` between write calls, so the following should work without needing to merge all `serializedLogs` into a single string just for `writeRaw`: ```java serializedLogs.forEach(generator::writeRaw); ``` | private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
} | .collect(Collectors.joining(","))); | private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
only check requestContext.sessionToken should be enough: ``` if (StringUtils.isNotEmpty(originalSessionToken)) { ISessionToken sessionToken = getLocalSessionToken(request, originalSessionToken, partitionKeyRangeId); request.requestContext.sessionToken = sessionToken; } else { // use ambient session token. ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, partitionKeyRangeId); request.requestContext.sessionToken = sessionToken; } ``` | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
if (request.requestContext.sessionToken == null && request.getOriginalSessionToken() == null) {
storeResponseStatistics.requestSessionToken = null;
} else {
if (request.getOriginalSessionToken() != null) {
storeResponseStatistics.requestSessionToken = request.getOriginalSessionToken();
} else {
storeResponseStatistics.requestSessionToken = SessionTokenHelper.concatPartitionKeyRangeIdWithSessionToken(request.requestContext.resolvedPartitionKeyRange.getId(), request.requestContext.sessionToken.convertToString());
}
}
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | if (request.requestContext.sessionToken == null && request.getOriginalSessionToken() == null) { | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
storeResponseStatistics.requestSessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN);
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private String requestSessionToken;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
this.requestSessionToken = toBeCloned.requestSessionToken;
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public String getRequestSessionToken() { return requestSessionToken; }
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} |
consider break the line for readability(can start trying with 120 characters/line) | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
if (request.requestContext.sessionToken == null && request.getOriginalSessionToken() == null) {
storeResponseStatistics.requestSessionToken = null;
} else {
if (request.getOriginalSessionToken() != null) {
storeResponseStatistics.requestSessionToken = request.getOriginalSessionToken();
} else {
storeResponseStatistics.requestSessionToken = SessionTokenHelper.concatPartitionKeyRangeIdWithSessionToken(request.requestContext.resolvedPartitionKeyRange.getId(), request.requestContext.sessionToken.convertToString());
}
}
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | storeResponseStatistics.requestSessionToken = SessionTokenHelper.concatPartitionKeyRangeIdWithSessionToken(request.requestContext.resolvedPartitionKeyRange.getId(), request.requestContext.sessionToken.convertToString()); | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
storeResponseStatistics.requestSessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN);
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private String requestSessionToken;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
this.requestSessionToken = toBeCloned.requestSessionToken;
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public String getRequestSessionToken() { return requestSessionToken; }
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} |
We probably can get the sessionToken directly from he requestHeaders then there is no need to recalculate the string format: ``` if (request.requestContext.sessionToken == null) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } else { request.getHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, concatPartitionKeyRangeIdWithSessionToken(partitionKeyRangeId, request.requestContext.sessionToken.convertToString())); } ``` | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
if (request.requestContext.sessionToken == null && request.getOriginalSessionToken() == null) {
storeResponseStatistics.requestSessionToken = null;
} else {
if (request.getOriginalSessionToken() != null) {
storeResponseStatistics.requestSessionToken = request.getOriginalSessionToken();
} else {
storeResponseStatistics.requestSessionToken = SessionTokenHelper.concatPartitionKeyRangeIdWithSessionToken(request.requestContext.resolvedPartitionKeyRange.getId(), request.requestContext.sessionToken.convertToString());
}
}
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | if (request.getOriginalSessionToken() != null) { | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
storeResponseStatistics.requestSessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN);
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private String requestSessionToken;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
this.requestSessionToken = toBeCloned.requestSessionToken;
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public String getRequestSessionToken() { return requestSessionToken; }
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} |
request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN) itself should be enough | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
if (request.requestContext.sessionToken != null) {
storeResponseStatistics.requestSessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN);
} else {
storeResponseStatistics.requestSessionToken = request.getOriginalSessionToken();
}
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | storeResponseStatistics.requestSessionToken = request.getOriginalSessionToken(); | public void recordResponse(RxDocumentServiceRequest request, StoreResultDiagnostics storeResultDiagnostics, GlobalEndpointManager globalEndpointManager) {
Objects.requireNonNull(request, "request is required and cannot be null.");
Instant responseTime = Instant.now();
StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics();
storeResponseStatistics.requestResponseTimeUTC = responseTime;
storeResponseStatistics.storeResult = storeResultDiagnostics;
storeResponseStatistics.requestOperationType = request.getOperationType();
storeResponseStatistics.requestResourceType = request.getResourceType();
storeResponseStatistics.requestSessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN);
activityId = request.getActivityId().toString();
URI locationEndPoint = null;
if (request.requestContext != null) {
if (request.requestContext.locationEndpointToRoute != null) {
locationEndPoint = request.requestContext.locationEndpointToRoute;
}
}
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, request.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
if (storeResponseStatistics.requestOperationType == OperationType.Head
|| storeResponseStatistics.requestOperationType == OperationType.HeadFeed) {
this.supplementalResponseStatisticsList.add(storeResponseStatistics);
} else {
this.responseStatisticsList.add(storeResponseStatistics);
}
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} | class ClientSideRequestStatistics {
private static final int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10;
private final DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig;
private String activityId;
private List<StoreResponseStatistics> responseStatisticsList;
private List<StoreResponseStatistics> supplementalResponseStatisticsList;
private Map<String, AddressResolutionStatistics> addressResolutionStatistics;
private List<URI> contactedReplicas;
private Set<URI> failedReplicas;
private Instant requestStartTimeUTC;
private Instant requestEndTimeUTC;
private Set<String> regionsContacted;
private Set<URI> locationEndpointsContacted;
private RetryContext retryContext;
private GatewayStatistics gatewayStatistics;
private MetadataDiagnosticsContext metadataDiagnosticsContext;
private SerializationDiagnosticsContext serializationDiagnosticsContext;
public ClientSideRequestStatistics(DiagnosticsClientContext diagnosticsClientContext) {
this.diagnosticsClientConfig = diagnosticsClientContext.getConfig();
this.requestStartTimeUTC = Instant.now();
this.requestEndTimeUTC = Instant.now();
this.responseStatisticsList = new ArrayList<>();
this.supplementalResponseStatisticsList = new ArrayList<>();
this.addressResolutionStatistics = new HashMap<>();
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>());
this.failedReplicas = Collections.synchronizedSet(new HashSet<>());
this.regionsContacted = Collections.synchronizedSet(new HashSet<>());
this.locationEndpointsContacted = Collections.synchronizedSet(new HashSet<>());
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext();
this.serializationDiagnosticsContext = new SerializationDiagnosticsContext();
this.retryContext = new RetryContext();
}
public ClientSideRequestStatistics(ClientSideRequestStatistics toBeCloned) {
this.diagnosticsClientConfig = toBeCloned.diagnosticsClientConfig;
this.requestStartTimeUTC = toBeCloned.requestStartTimeUTC;
this.requestEndTimeUTC = toBeCloned.requestEndTimeUTC;
this.responseStatisticsList = new ArrayList<>(toBeCloned.responseStatisticsList);
this.supplementalResponseStatisticsList = new ArrayList<>(toBeCloned.supplementalResponseStatisticsList);
this.addressResolutionStatistics = new HashMap<>(toBeCloned.addressResolutionStatistics);
this.contactedReplicas = Collections.synchronizedList(new ArrayList<>(toBeCloned.contactedReplicas));
this.failedReplicas = Collections.synchronizedSet(new HashSet<>(toBeCloned.failedReplicas));
this.regionsContacted = Collections.synchronizedSet(new HashSet<>(toBeCloned.regionsContacted));
this.locationEndpointsContacted = Collections.synchronizedSet(
new HashSet<>(toBeCloned.locationEndpointsContacted));
this.metadataDiagnosticsContext = new MetadataDiagnosticsContext(toBeCloned.metadataDiagnosticsContext);
this.serializationDiagnosticsContext =
new SerializationDiagnosticsContext(toBeCloned.serializationDiagnosticsContext);
this.retryContext = new RetryContext(toBeCloned.retryContext);
}
public Duration getDuration() {
return Duration.between(requestStartTimeUTC, requestEndTimeUTC);
}
public Instant getRequestStartTimeUTC() {
return requestStartTimeUTC;
}
public DiagnosticsClientContext.DiagnosticsClientConfig getDiagnosticsClientConfig() {
return diagnosticsClientConfig;
}
public void recordGatewayResponse(
RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponseDiagnostics storeResponseDiagnostics, GlobalEndpointManager globalEndpointManager) {
Instant responseTime = Instant.now();
synchronized (this) {
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
URI locationEndPoint = null;
if (rxDocumentServiceRequest != null && rxDocumentServiceRequest.requestContext != null) {
locationEndPoint = rxDocumentServiceRequest.requestContext.locationEndpointToRoute;
}
this.recordRetryContextEndTime();
if (locationEndPoint != null) {
this.regionsContacted.add(globalEndpointManager.getRegionName(locationEndPoint, rxDocumentServiceRequest.getOperationType()));
this.locationEndpointsContacted.add(locationEndPoint);
}
this.gatewayStatistics = new GatewayStatistics();
if (rxDocumentServiceRequest != null) {
this.gatewayStatistics.operationType = rxDocumentServiceRequest.getOperationType();
this.gatewayStatistics.resourceType = rxDocumentServiceRequest.getResourceType();
}
this.gatewayStatistics.statusCode = storeResponseDiagnostics.getStatusCode();
this.gatewayStatistics.subStatusCode = storeResponseDiagnostics.getSubStatusCode();
this.gatewayStatistics.sessionToken = storeResponseDiagnostics.getSessionTokenAsString();
this.gatewayStatistics.requestCharge = storeResponseDiagnostics.getRequestCharge();
this.gatewayStatistics.requestTimeline = storeResponseDiagnostics.getRequestTimeline();
this.gatewayStatistics.partitionKeyRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
this.gatewayStatistics.exceptionMessage = storeResponseDiagnostics.getExceptionMessage();
this.gatewayStatistics.exceptionResponseHeaders = storeResponseDiagnostics.getExceptionResponseHeaders();
this.activityId = storeResponseDiagnostics.getActivityId();
}
}
public String recordAddressResolutionStart(
URI targetEndpoint,
boolean forceRefresh,
boolean forceCollectionRoutingMapRefresh) {
String identifier = Utils
.randomUUID()
.toString();
AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics();
resolutionStatistics.startTimeUTC = Instant.now();
resolutionStatistics.endTimeUTC = null;
resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString();
resolutionStatistics.forceRefresh = forceRefresh;
resolutionStatistics.forceCollectionRoutingMapRefresh = forceCollectionRoutingMapRefresh;
synchronized (this) {
this.addressResolutionStatistics.put(identifier, resolutionStatistics);
}
return identifier;
}
public void recordAddressResolutionEnd(String identifier, String exceptionMessage) {
if (StringUtils.isEmpty(identifier)) {
return;
}
Instant responseTime = Instant.now();
synchronized (this) {
if (!this.addressResolutionStatistics.containsKey(identifier)) {
throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start "
+ "before calling end");
}
if (responseTime.isAfter(this.requestEndTimeUTC)) {
this.requestEndTimeUTC = responseTime;
}
AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier);
resolutionStatistics.endTimeUTC = responseTime;
resolutionStatistics.exceptionMessage = exceptionMessage;
resolutionStatistics.inflightRequest = false;
}
}
public List<URI> getContactedReplicas() {
return contactedReplicas;
}
public void setContactedReplicas(List<URI> contactedReplicas) {
this.contactedReplicas = Collections.synchronizedList(contactedReplicas);
}
public Set<URI> getFailedReplicas() {
return failedReplicas;
}
public void setFailedReplicas(Set<URI> failedReplicas) {
this.failedReplicas = Collections.synchronizedSet(failedReplicas);
}
public Set<String> getContactedRegionNames() {
return regionsContacted;
}
public void setRegionsContacted(Set<String> regionsContacted) {
this.regionsContacted = Collections.synchronizedSet(regionsContacted);
}
public Set<URI> getLocationEndpointsContacted() {
return locationEndpointsContacted;
}
public void setLocationEndpointsContacted(Set<URI> locationEndpointsContacted) {
this.locationEndpointsContacted = locationEndpointsContacted;
}
public MetadataDiagnosticsContext getMetadataDiagnosticsContext(){
return this.metadataDiagnosticsContext;
}
public SerializationDiagnosticsContext getSerializationDiagnosticsContext() {
return this.serializationDiagnosticsContext;
}
public void recordRetryContextEndTime() {
this.retryContext.updateEndTime();
}
public RetryContext getRetryContext() {
return retryContext;
}
public List<StoreResponseStatistics> getResponseStatisticsList() {
return responseStatisticsList;
}
public List<StoreResponseStatistics> getSupplementalResponseStatisticsList() {
return supplementalResponseStatisticsList;
}
public Map<String, AddressResolutionStatistics> getAddressResolutionStatistics() {
return addressResolutionStatistics;
}
public GatewayStatistics getGatewayStatistics() {
return gatewayStatistics;
}
public static class StoreResponseStatistics {
@JsonSerialize(using = StoreResultDiagnostics.StoreResultDiagnosticsSerializer.class)
private StoreResultDiagnostics storeResult;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant requestResponseTimeUTC;
@JsonSerialize
private ResourceType requestResourceType;
@JsonSerialize
private OperationType requestOperationType;
@JsonSerialize
private String requestSessionToken;
public StoreResultDiagnostics getStoreResult() {
return storeResult;
}
public Instant getRequestResponseTimeUTC() {
return requestResponseTimeUTC;
}
public ResourceType getRequestResourceType() {
return requestResourceType;
}
public OperationType getRequestOperationType() {
return requestOperationType;
}
public String getRequestSessionToken() { return requestSessionToken; }
}
public static class SystemInformation {
private String usedMemory;
private String availableMemory;
private String systemCpuLoad;
private int availableProcessors;
public String getUsedMemory() {
return usedMemory;
}
public String getAvailableMemory() {
return availableMemory;
}
public String getSystemCpuLoad() {
return systemCpuLoad;
}
public int getAvailableProcessors() {
return availableProcessors;
}
}
public static class ClientSideRequestStatisticsSerializer extends StdSerializer<ClientSideRequestStatistics> {
private static final long serialVersionUID = -2746532297176812860L;
ClientSideRequestStatisticsSerializer() {
super(ClientSideRequestStatistics.class);
}
@Override
public void serialize(
ClientSideRequestStatistics statistics, JsonGenerator generator, SerializerProvider provider) throws
IOException {
generator.writeStartObject();
long requestLatency = statistics
.getDuration()
.toMillis();
generator.writeStringField("userAgent", Utils.getUserAgent());
generator.writeStringField("activityId", statistics.activityId);
generator.writeNumberField("requestLatencyInMs", requestLatency);
generator.writeStringField("requestStartTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestStartTimeUTC));
generator.writeStringField("requestEndTimeUTC", DiagnosticsInstantSerializer.fromInstant(statistics.requestEndTimeUTC));
generator.writeObjectField("responseStatisticsList", statistics.responseStatisticsList);
generator.writeObjectField("supplementalResponseStatisticsList", getCappedSupplementalResponseStatisticsList(statistics.supplementalResponseStatisticsList));
generator.writeObjectField("addressResolutionStatistics", statistics.addressResolutionStatistics);
generator.writeObjectField("regionsContacted", statistics.regionsContacted);
generator.writeObjectField("retryContext", statistics.retryContext);
generator.writeObjectField("metadataDiagnosticsContext", statistics.getMetadataDiagnosticsContext());
generator.writeObjectField("serializationDiagnosticsContext", statistics.getSerializationDiagnosticsContext());
generator.writeObjectField("gatewayStatistics", statistics.gatewayStatistics);
try {
SystemInformation systemInformation = fetchSystemInformation();
generator.writeObjectField("systemInformation", systemInformation);
} catch (Exception e) {
}
generator.writeObjectField("clientCfgs", statistics.diagnosticsClientConfig);
generator.writeEndObject();
}
}
public static List<StoreResponseStatistics> getCappedSupplementalResponseStatisticsList(List<StoreResponseStatistics> supplementalResponseStatisticsList) {
int supplementalResponseStatisticsListCount = supplementalResponseStatisticsList.size();
int initialIndex =
Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0);
if (initialIndex != 0) {
List<StoreResponseStatistics> subList = supplementalResponseStatisticsList
.subList(initialIndex,
supplementalResponseStatisticsListCount);
return subList;
}
return supplementalResponseStatisticsList;
}
public static class AddressResolutionStatistics {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private Instant endTimeUTC;
@JsonSerialize
private String targetEndpoint;
@JsonSerialize
private String exceptionMessage;
@JsonSerialize
private boolean forceRefresh;
@JsonSerialize
private boolean forceCollectionRoutingMapRefresh;
@JsonSerialize
private boolean inflightRequest = true;
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
public String getTargetEndpoint() {
return targetEndpoint;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public boolean isInflightRequest() {
return inflightRequest;
}
public boolean isForceRefresh() {
return forceRefresh;
}
public boolean isForceCollectionRoutingMapRefresh() {
return forceCollectionRoutingMapRefresh;
}
}
public static class GatewayStatistics {
private String sessionToken;
private OperationType operationType;
private ResourceType resourceType;
private int statusCode;
private int subStatusCode;
private double requestCharge;
private RequestTimeline requestTimeline;
private String partitionKeyRangeId;
private String exceptionMessage;
private String exceptionResponseHeaders;
public String getSessionToken() {
return sessionToken;
}
public OperationType getOperationType() {
return operationType;
}
public int getStatusCode() {
return statusCode;
}
public int getSubStatusCode() {
return subStatusCode;
}
public double getRequestCharge() {
return requestCharge;
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
public ResourceType getResourceType() {
return resourceType;
}
public String getPartitionKeyRangeId() {
return partitionKeyRangeId;
}
public String getExceptionMessage() {
return exceptionMessage;
}
public String getExceptionResponseHeaders() {
return exceptionResponseHeaders;
}
}
public static SystemInformation fetchSystemInformation() {
SystemInformation systemInformation = new SystemInformation();
Runtime runtime = Runtime.getRuntime();
long totalMemory = runtime.totalMemory() / 1024;
long freeMemory = runtime.freeMemory() / 1024;
long maxMemory = runtime.maxMemory() / 1024;
systemInformation.usedMemory = totalMemory - freeMemory + " KB";
systemInformation.availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB";
systemInformation.availableProcessors = runtime.availableProcessors();
systemInformation.systemCpuLoad = CpuMemoryMonitor
.getCpuLoad()
.toString();
return systemInformation;
}
} |
I tried this first but the JSON was created without `,` delimiters. | private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
} | .collect(Collectors.joining(","))); | private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
can we also have a case for normal create item operation to show that the requestSessionToken will not be sent And it is okie to put all the requestSessionToken validation into a different test method | public void directDiagnostics() throws Exception {
CosmosClient testDirectClient = null;
try {
testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.consistencyLevel(ConsistencyLevel.SESSION)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer cosmosContainer =
testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsAnyOf(
"\"machineId\":\"" + tempMachineId + "\"",
"\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\""
);
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).contains("\"requestSessionToken\"");
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient());
isValidJSON(diagnostics);
String sessionToken = createResponse.getSessionToken();
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey(BridgeInternal.getProperties(createResponse).getId()),
InternalObjectNode.class);
diagnostics = readResponse.getDiagnostics().toString();
assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", sessionToken));
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(
BridgeInternal.getProperties(createResponse).getId()));
batch.deleteItemOperation(BridgeInternal.getProperties(createResponse).getId());
batch.createItemOperation(internalObjectNode);
CosmosBatchResponse batchResponse = cosmosContainer.executeCosmosBatch(batch,
new CosmosBatchRequestOptions().setSessionToken("0:-1
diagnostics = batchResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"requestSessionToken\":\"0:-1
try {
cosmosContainer.createItem(internalObjectNode);
fail("expected 409");
} catch (CosmosException e) {
diagnostics = e.getDiagnostics().toString();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\"");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\"");
validateTransportRequestTimelineDirect(e.getDiagnostics().toString());
}
} finally {
if (testDirectClient != null) {
testDirectClient.close();
}
}
} | public void directDiagnostics() throws Exception {
CosmosClient testDirectClient = null;
try {
testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer cosmosContainer =
testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsAnyOf(
"\"machineId\":\"" + tempMachineId + "\"",
"\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\""
);
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient());
isValidJSON(diagnostics);
try {
cosmosContainer.createItem(internalObjectNode);
fail("expected 409");
} catch (CosmosException e) {
diagnostics = e.getDiagnostics().toString();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\"");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\"");
validateTransportRequestTimelineDirect(e.getDiagnostics().toString());
}
} finally {
if (testDirectClient != null) {
testDirectClient.close();
}
}
} | class CosmosDiagnosticsTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT;
private static final String tempMachineId = getTempMachineId();
private CosmosClient gatewayClient;
private CosmosClient directClient;
private CosmosAsyncDatabase cosmosAsyncDatabase;
private CosmosContainer container;
private CosmosAsyncContainer cosmosAsyncContainer;
private static String getTempMachineId() {
Field field = null;
try {
field = RxDocumentClientImpl.class.getDeclaredField("tempMachineId");
} catch (NoSuchFieldException e) {
fail(e.toString());
}
field.setAccessible(true);
try {
return (String)field.get(null);
} catch (IllegalAccessException e) {
fail(e.toString());
return null;
}
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void beforeClass() {
assertThat(this.gatewayClient).isNull();
gatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
directClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient());
cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId());
container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
if (this.gatewayClient != null) {
this.gatewayClient.close();
}
if (this.directClient != null) {
this.directClient.close();
}
}
@DataProvider(name = "query")
private Object[][] query() {
return new Object[][]{
new Object[] { "Select * from c where c.id = 'wrongId'", true },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
};
}
@DataProvider(name = "readAllItemsOfLogicalPartition")
private Object[][] readAllItemsOfLogicalPartition() {
return new Object[][]{
new Object[] { 1, true },
new Object[] { 5, null },
new Object[] { 20, null },
new Object[] { 1, false },
new Object[] { 5, false },
new Object[] { 20, false },
};
}
@DataProvider(name = "connectionStateListenerArgProvider")
public Object[][] connectionStateListenerArgProvider() {
return new Object[][]{
{true},
{false}
};
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnostics() throws Exception {
CosmosClient testGatewayClient = null;
try {
testGatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
Thread.sleep(2000);
CosmosContainer container =
testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Create\"");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsAnyOf(
"\"machineId\":\"" + tempMachineId + "\"",
"\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\""
);
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient());
isValidJSON(diagnostics);
} finally {
if (testGatewayClient != null) {
testGatewayClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnosticsOnException() throws Exception {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
try {
createResponse = this.container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
this.container.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"statusCode\":404");
assertThat(diagnostics).contains("\"operationType\":\"Read\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).contains("\"exceptionMessage\":\"Entity with the specified id does not exist in the system.");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\"");
assertThat(diagnostics).containsAnyOf(
"\"machineId\":\"" + tempMachineId + "\"",
"\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\""
);
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient());
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
isValidJSON(diagnostics);
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void systemDiagnosticsForSystemStateInformation() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("systemInformation");
assertThat(diagnostics).contains("usedMemory");
assertThat(diagnostics).contains("availableMemory");
assertThat(diagnostics).contains("systemCpuLoad");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryPlanDiagnostics() throws JsonProcessingException {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
queryList.add("Select * from c");
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
queryList.add("Select * from c where c.id = 'wrongId'");
for(String query : queryList) {
int feedResponseCounter = 0;
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
if (feedResponseCounter == 0) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline());
assertThat(requestTimeLine).contains("connectionConfigured");
assertThat(requestTimeLine).contains("requestSent");
assertThat(requestTimeLine).contains("transitTime");
assertThat(requestTimeLine).contains("received");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline =");
}
feedResponseCounter++;
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithIndexMetrics() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
for (String query : queryList) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
options.setIndexMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
logger.info("This is query diagnostics {}", queryDiagnostics);
if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) {
assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull();
assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull();
}
}
}
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT)
public void queryMetrics(String query, Boolean qmEnabled) {
CosmosContainer directContainer =
this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId())
.getContainer(this.cosmosAsyncContainer.getId());
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
boolean qroupByFirstResponse = true;
Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options,
InternalObjectNode.class).iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateDirectModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryDiagnosticsOnOrderBy() {
String containerId = "testcontainer";
cosmosAsyncDatabase.createContainer(containerId, "/mypk",
ThroughputProperties.createManualThroughput(40000)).block();
CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
testcontainer.createItem(getInternalObjectNode()).block();
options.setMaxDegreeOfParallelism(-1);
String query = "SELECT * from c ORDER BY c._ts DESC";
CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options,
InternalObjectNode.class);
Set<String> partitionKeyRangeIds = new HashSet<>();
Set<String> pkRids = new HashSet<>();
cosmosPagedFlux.byPage().flatMap(feedResponse -> {
String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString();
Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)");
Matcher matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
String group = matcher.group(2);
partitionKeyRangeIds.add(group);
}
pattern = Pattern.compile("(pkrId:)(\\d)");
matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
String group = matcher.group(2);
pkRids.add(group);
}
return Flux.just(feedResponse);
}).blockLast();
assertThat(pkRids).isNotEmpty();
assertThat(pkRids).isEqualTo(partitionKeyRangeIds);
deleteCollection(testcontainer);
}
private void validateDirectModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("responseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
private void validateGatewayModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Query\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"regionsContacted\"");
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2)
public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) {
CosmosClient testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId())
.getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for (int i = 0; i < 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if (i % 20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
boolean qroupByFirstResponse = true;
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer
.queryItems(query, options, InternalObjectNode.class)
.iterableByPage()
.iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateGatewayModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithADifferentLocale() {
Locale.setDefault(Locale.GERMAN);
String query = "select * from root where root.id= \"someid\"";
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options,
InternalObjectNode.class)
.iterableByPage().iterator();
double requestCharge = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
requestCharge += feedResponse.getRequestCharge();
}
assertThat(requestCharge).isGreaterThan(0);
Locale.setDefault(Locale.ROOT);
}
private static void validateQueryDiagnostics(
String queryDiagnostics,
Boolean qmEnabled,
boolean expectQueryPlanDiagnostics) {
if (qmEnabled == null || qmEnabled) {
assertThat(queryDiagnostics).contains("Retrieved Document Count");
assertThat(queryDiagnostics).contains("Query Preparation Times");
assertThat(queryDiagnostics).contains("Runtime Execution Times");
assertThat(queryDiagnostics).contains("Partition Execution Timeline");
} else {
assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count");
assertThat(queryDiagnostics).doesNotContain("Query Preparation Times");
assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times");
assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline");
}
if (expectQueryPlanDiagnostics) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
}
}
@Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT)
public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) {
String pkValue = UUID.randomUUID().toString();
for (int i = 0; i < expectedItemCount; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue);
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
}
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options = options.setQueryMetricsEnabled(qmEnabled);
}
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5);
Iterator<FeedResponse<InternalObjectNode>> iterator =
this.container
.readAllItems(
new PartitionKey(pkValue),
options,
InternalObjectNode.class)
.iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
int actualItemCount = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
actualItemCount += feedResponse.getResults().size();
validateQueryDiagnostics(queryDiagnostics, qmEnabled, false);
}
assertThat(actualItemCount).isEqualTo(expectedItemCount);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnException() throws Exception {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource Not Found.");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\":null");
isValidJSON(diagnostics);
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient());
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnMetadataException() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class)))
.thenReturn(Mono.error(new CosmosException(400, "TestBadRequest")));
RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper());
ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient);
container.createItem(internalObjectNode);
fail("request should fail as bad request");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("\"exceptionMessage\":\"TestBadRequest\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\"");
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
isValidJSON(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void supplementalResponseStatisticsList() throws Exception {
ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext());
for (int i = 0; i < 15; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null);
}
List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
ObjectMapper objectMapper = new ObjectMapper();
String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
JsonNode jsonNode = objectMapper.readTree(diagnostics);
ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(15);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10);
clearStoreResponseStatistics(clientSideRequestStatistics);
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
assertThat(storeResponseStatistics.size()).isEqualTo(0);
for (int i = 0; i < 7; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null);
}
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
objectMapper = new ObjectMapper();
diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
jsonNode = objectMapper.readTree(diagnostics);
supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(7);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7);
for(JsonNode node : supplementalResponseStatisticsListNode) {
assertThat(node.get("storeResult").asText()).isNotNull();
String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText();
Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC));
assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000);
assertThat(node.get("requestResponseTimeUTC")).isNotNull();
assertThat(node.get("requestOperationType")).isNotNull();
assertThat(node.get("requestSessionToken")).isNotNull();
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void serializationOnVariousScenarios() {
CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read();
String diagnostics = cosmosDatabase.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\"");
CosmosContainerResponse containerResponse = this.container.read();
diagnostics = containerResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\"");
TestItem testItem = new TestItem();
testItem.id = "TestId";
testItem.mypk = "TestPk";
CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
testItem.id = "TestId2";
testItem.mypk = "TestPk";
itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\"");
TestItem readTestItem = itemResponse.getItem();
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class);
InternalObjectNode properties = readItemResponse.getItem();
diagnostics = readItemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void rntbdRequestResponseLengthStatistics() throws Exception {
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem);
validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse));
try {
container.createItem(testItem);
fail("expected to fail due to 409");
} catch (CosmosException e) {
logger.info("Diagnostics are : {}", e.getDiagnostics());
String diagnostics = e.getDiagnostics().toString();
assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\"");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\"");
validate(e.getDiagnostics(), testItemLength, 0);
}
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
}
@Test(groups = {"simple"}, dataProvider = "connectionStateListenerArgProvider", timeOut = TIMEOUT)
public void rntbdStatistics(boolean connectionStateListenerEnabled) throws Exception {
Instant beforeClientInitialization = Instant.now();
CosmosClient client1 = null;
try {
DirectConnectionConfig connectionConfig = DirectConnectionConfig.getDefaultConfig();
connectionConfig.setConnectionEndpointRediscoveryEnabled(connectionStateListenerEnabled);
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode(connectionConfig)
.buildClient();
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
Thread.sleep(1000);
Instant beforeInitializingRntbdServiceEndpoint = Instant.now();
CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem);
Instant afterInitializingRntbdServiceEndpoint = Instant.now();
Thread.sleep(1000);
Instant beforeOperation2 = Instant.now();
CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem);
Instant afterOperation2 = Instant.now();
Thread.sleep(1000);
Instant beforeOperation3 = Instant.now();
CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem);
Instant afterOperation3 = Instant.now();
validateRntbdStatistics(operation3Response.getDiagnostics(),
beforeClientInitialization,
beforeInitializingRntbdServiceEndpoint,
afterInitializingRntbdServiceEndpoint,
beforeOperation2,
afterOperation2,
beforeOperation3,
afterOperation3,
connectionStateListenerEnabled);
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
} finally {
LifeCycleUtils.closeQuietly(client1);
}
}
private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics,
Instant clientInitializationTime,
Instant beforeInitializingRntbdServiceEndpoint,
Instant afterInitializingRntbdServiceEndpoint,
Instant beforeOperation2,
Instant afterOperation2,
Instant beforeOperation3,
Instant afterOperation3,
boolean connectionStateListenerEnabled) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
assertThat(storeResult).isNotNull();
assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThanOrEqualTo(0);
assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0);
JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics");
assertThat(serviceEndpointStatistics).isNotNull();
assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0);
assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0);
assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1);
assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false);
JsonNode connectionStateListenerMetrics = serviceEndpointStatistics.get("cerMetrics");
if (connectionStateListenerEnabled) {
assertThat(connectionStateListenerMetrics).isNotNull();
assertThat(connectionStateListenerMetrics.get("lastCallTimestamp")).isNull();
assertThat(connectionStateListenerMetrics.get("lastActionableContext")).isNull();
} else {
assertThat(connectionStateListenerMetrics).isNull();
}
Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isAfterOrEqualTo(beforeInitializationThreshold);
Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isBeforeOrEqualTo(afterInitializationThreshold);
Instant afterOperation2Threshold = afterOperation2.plusMillis(2);
Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold.toString())
.isBeforeOrEqualTo(afterOperation2Threshold.toString());
assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold.toString())
.isBeforeOrEqualTo(afterOperation2Threshold.toString());
}
private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
boolean hasPayload = storeResult.get("exceptionMessage") == null;
assertThat(storeResult).isNotNull();
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize);
if (hasPayload) {
assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize);
}
assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize);
}
@Test(groups = {"emulator"}, timeOut = TIMEOUT)
public void addressResolutionStatistics() {
CosmosClient client1 = null;
CosmosClient client2 = null;
String databaseId = DatabaseForTest.generateId();
String containerId = UUID.randomUUID().toString();
CosmosDatabase cosmosDatabase = null;
CosmosContainer cosmosContainer = null;
try {
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
client1.createDatabase(databaseId);
cosmosDatabase = client1.getDatabase(databaseId);
cosmosDatabase.createContainer(containerId, "/mypk");
InternalObjectNode internalObjectNode = getInternalObjectNode();
cosmosContainer = cosmosDatabase.getContainer(containerId);
CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode);
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":null");
client2 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosDatabase = client2.getDatabase(databaseId);
cosmosContainer = cosmosDatabase.getContainer(containerId);
AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient();
GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient,
"addressResolver", true);
@SuppressWarnings("rawtypes")
Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver,
"addressCacheByEndpoint",
true);
Object endpointCache = addressCacheByEndpoint.values().toArray()[0];
GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true);
HttpClient httpClient = httpClient(true);
FieldUtils.writeField(addressCache, "httpClient", httpClient, true);
new Thread(() -> {
try {
Thread.sleep(5000);
HttpClient httpClient1 = httpClient(false);
FieldUtils.writeField(addressCache, "httpClient", httpClient1, true);
} catch (Exception e) {
fail(e.getMessage());
}
}).start();
PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk"));
CosmosItemResponse<InternalObjectNode> readResourceResponse =
cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(),
InternalObjectNode.class);
assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":\"io.netty" +
".channel.AbstractChannel$AnnotatedConnectException: Connection refused");
} catch (Exception ex) {
logger.error("Error in test addressResolutionStatistics", ex);
fail("This test should not throw exception " + ex);
} finally {
safeDeleteSyncDatabase(cosmosDatabase);
if (client1 != null) {
client1.close();
}
if (client2 != null) {
client2.close();
}
}
}
private InternalObjectNode getInternalObjectNode() {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", uuid);
return internalObjectNode;
}
private InternalObjectNode getInternalObjectNode(String pkValue) {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue);
return internalObjectNode;
}
private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
@SuppressWarnings({"unchecked"})
List<ClientSideRequestStatistics.StoreResponseStatistics> list
= (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics);
return list;
}
private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>());
}
private void validateTransportRequestTimelineGateway(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"requestSent\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
}
private void validateTransportRequestTimelineDirect(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"created\"");
assertThat(diagnostics).contains("\"eventName\":\"queued\"");
assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\"");
assertThat(diagnostics).contains("\"eventName\":\"pipelined\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"decodeTime");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
assertThat(diagnostics).contains("\"eventName\":\"completed\"");
assertThat(diagnostics).contains("\"startTimeUTC\"");
assertThat(diagnostics).contains("\"durationInMicroSec\"");
}
public void isValidJSON(final String json) {
try {
final JsonParser parser = new ObjectMapper().createParser(json);
while (parser.nextToken() != null) {
}
} catch (IOException ex) {
fail("Diagnostic string is not in json format ", ex);
}
}
private HttpClient httpClient(boolean fakeProxy) {
HttpClientConfig httpClientConfig;
if(fakeProxy) {
httpClientConfig = new HttpClientConfig(new Configs())
.withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)));
} else {
httpClientConfig = new HttpClientConfig(new Configs());
}
return HttpClient.createFixed(httpClientConfig);
}
private IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
}
private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception {
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient);
GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient);
LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager);
Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo");
locationInfoField.setAccessible(true);
Object locationInfo = locationInfoField.get(locationCache);
Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" +
".LocationCache$DatabaseAccountLocationsInfo");
Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField(
"availableWriteEndpointByLocation");
availableWriteEndpointByLocation.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo);
String regionName = map.keySet().iterator().next();
assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1);
assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase());
}
public static class TestItem {
public String id;
public String mypk;
public TestItem() {
}
}
} | class CosmosDiagnosticsTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT;
private static final String tempMachineId = getTempMachineId();
private CosmosClient gatewayClient;
private CosmosClient directClient;
private CosmosAsyncDatabase cosmosAsyncDatabase;
private CosmosContainer container;
private CosmosAsyncContainer cosmosAsyncContainer;
private static String getTempMachineId() {
Field field = null;
try {
field = RxDocumentClientImpl.class.getDeclaredField("tempMachineId");
} catch (NoSuchFieldException e) {
fail(e.toString());
}
field.setAccessible(true);
try {
return (String)field.get(null);
} catch (IllegalAccessException e) {
fail(e.toString());
return null;
}
}
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void beforeClass() {
assertThat(this.gatewayClient).isNull();
gatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
directClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient());
cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId());
container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
if (this.gatewayClient != null) {
this.gatewayClient.close();
}
if (this.directClient != null) {
this.directClient.close();
}
}
@DataProvider(name = "query")
private Object[][] query() {
return new Object[][]{
new Object[] { "Select * from c where c.id = 'wrongId'", true },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId'", false },
new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false },
new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false },
new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false },
new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false },
};
}
@DataProvider(name = "readAllItemsOfLogicalPartition")
private Object[][] readAllItemsOfLogicalPartition() {
return new Object[][]{
new Object[] { 1, true },
new Object[] { 5, null },
new Object[] { 20, null },
new Object[] { 1, false },
new Object[] { 5, false },
new Object[] { 20, false },
};
}
@DataProvider(name = "connectionStateListenerArgProvider")
public Object[][] connectionStateListenerArgProvider() {
return new Object[][]{
{true},
{false}
};
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnostics() throws Exception {
CosmosClient testGatewayClient = null;
try {
testGatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
Thread.sleep(2000);
CosmosContainer container =
testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Create\"");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsAnyOf(
"\"machineId\":\"" + tempMachineId + "\"",
"\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\""
);
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient());
isValidJSON(diagnostics);
} finally {
if (testGatewayClient != null) {
testGatewayClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void gatewayDiagnosticsOnException() throws Exception {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
try {
createResponse = this.container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
this.container.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"statusCode\":404");
assertThat(diagnostics).contains("\"operationType\":\"Read\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).contains("\"exceptionMessage\":\"Entity with the specified id does not exist in the system.");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\"");
assertThat(diagnostics).containsAnyOf(
"\"machineId\":\"" + tempMachineId + "\"",
"\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\""
);
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull();
validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient());
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
isValidJSON(diagnostics);
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void systemDiagnosticsForSystemStateInformation() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("systemInformation");
assertThat(diagnostics).contains("usedMemory");
assertThat(diagnostics).contains("availableMemory");
assertThat(diagnostics).contains("systemCpuLoad");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void requestSessionTokenDiagnostics() {
CosmosClient testSessionTokenClient = null;
try {
testSessionTokenClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.consistencyLevel(ConsistencyLevel.SESSION)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer cosmosContainer =
testSessionTokenClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"requestSessionToken\":null");
String sessionToken = createResponse.getSessionToken();
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey(BridgeInternal.getProperties(createResponse).getId()),
InternalObjectNode.class);
diagnostics = readResponse.getDiagnostics().toString();
assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", sessionToken));
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(
BridgeInternal.getProperties(createResponse).getId()));
internalObjectNode = getInternalObjectNode();
batch.createItemOperation(internalObjectNode);
CosmosBatchResponse batchResponse = cosmosContainer.executeCosmosBatch(batch,
new CosmosBatchRequestOptions().setSessionToken("0:-1
diagnostics = batchResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"requestSessionToken\":\"0:-1
} finally {
if (testSessionTokenClient != null) {
testSessionTokenClient.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryPlanDiagnostics() throws JsonProcessingException {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
queryList.add("Select * from c");
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
queryList.add("Select * from c where c.id = 'wrongId'");
for(String query : queryList) {
int feedResponseCounter = 0;
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
if (feedResponseCounter == 0) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline());
assertThat(requestTimeLine).contains("connectionConfigured");
assertThat(requestTimeLine).contains("requestSent");
assertThat(requestTimeLine).contains("transitTime");
assertThat(requestTimeLine).contains("received");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline =");
}
feedResponseCounter++;
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithIndexMetrics() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for(int i = 0; i< 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if(i%20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
String queryDiagnostics = null;
List<String> queryList = new ArrayList<>();
StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in (");
for(int i = 0 ; i < itemIdList.size(); i++){
queryBuilder.append("'").append(itemIdList.get(i)).append("'");
if(i < (itemIdList.size()-1)) {
queryBuilder.append(",");
} else {
queryBuilder.append(")");
}
}
queryList.add(queryBuilder.toString());
for (String query : queryList) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setQueryMetricsEnabled(true);
options.setIndexMetricsEnabled(true);
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
logger.info("This is query diagnostics {}", queryDiagnostics);
if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) {
assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull();
assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull();
}
}
}
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT)
public void queryMetrics(String query, Boolean qmEnabled) {
CosmosContainer directContainer =
this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId())
.getContainer(this.cosmosAsyncContainer.getId());
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
boolean qroupByFirstResponse = true;
Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options,
InternalObjectNode.class).iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateDirectModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryDiagnosticsOnOrderBy() {
String containerId = "testcontainer";
cosmosAsyncDatabase.createContainer(containerId, "/mypk",
ThroughputProperties.createManualThroughput(40000)).block();
CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setConsistencyLevel(ConsistencyLevel.EVENTUAL);
testcontainer.createItem(getInternalObjectNode()).block();
options.setMaxDegreeOfParallelism(-1);
String query = "SELECT * from c ORDER BY c._ts DESC";
CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options,
InternalObjectNode.class);
Set<String> partitionKeyRangeIds = new HashSet<>();
Set<String> pkRids = new HashSet<>();
cosmosPagedFlux.byPage().flatMap(feedResponse -> {
String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString();
Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)");
Matcher matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
String group = matcher.group(2);
partitionKeyRangeIds.add(group);
}
pattern = Pattern.compile("(pkrId:)(\\d)");
matcher = pattern.matcher(cosmosDiagnosticsString);
while (matcher.find()) {
String group = matcher.group(2);
pkRids.add(group);
}
return Flux.just(feedResponse);
}).blockLast();
assertThat(pkRids).isNotEmpty();
assertThat(pkRids).isEqualTo(partitionKeyRangeIds);
deleteCollection(testcontainer);
}
private void validateDirectModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("responseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
private void validateGatewayModeQueryDiagnostics(String diagnostics) {
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Query\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"regionsContacted\"");
}
@Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2)
public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) {
CosmosClient testDirectClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId())
.getContainer(cosmosAsyncContainer.getId());
List<String> itemIdList = new ArrayList<>();
for (int i = 0; i < 100; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
if (i % 20 == 0) {
itemIdList.add(internalObjectNode.getId());
}
}
boolean qroupByFirstResponse = true;
if (qmEnabled != null) {
options.setQueryMetricsEnabled(qmEnabled);
}
Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer
.queryItems(query, options, InternalObjectNode.class)
.iterableByPage()
.iterator();
assertThat(iterator.hasNext()).isTrue();
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
assertThat(feedResponse.getResults().size()).isEqualTo(0);
if (!query.contains("group by") || qroupByFirstResponse) {
validateQueryDiagnostics(queryDiagnostics, qmEnabled, true);
validateGatewayModeQueryDiagnostics(queryDiagnostics);
if (query.contains("group by")) {
qroupByFirstResponse = false;
}
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryMetricsWithADifferentLocale() {
Locale.setDefault(Locale.GERMAN);
String query = "select * from root where root.id= \"someid\"";
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options,
InternalObjectNode.class)
.iterableByPage().iterator();
double requestCharge = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
requestCharge += feedResponse.getRequestCharge();
}
assertThat(requestCharge).isGreaterThan(0);
Locale.setDefault(Locale.ROOT);
}
private static void validateQueryDiagnostics(
String queryDiagnostics,
Boolean qmEnabled,
boolean expectQueryPlanDiagnostics) {
if (qmEnabled == null || qmEnabled) {
assertThat(queryDiagnostics).contains("Retrieved Document Count");
assertThat(queryDiagnostics).contains("Query Preparation Times");
assertThat(queryDiagnostics).contains("Runtime Execution Times");
assertThat(queryDiagnostics).contains("Partition Execution Timeline");
} else {
assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count");
assertThat(queryDiagnostics).doesNotContain("Query Preparation Times");
assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times");
assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline");
}
if (expectQueryPlanDiagnostics) {
assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)=");
} else {
assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)=");
assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)=");
}
}
@Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT)
public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) {
String pkValue = UUID.randomUUID().toString();
for (int i = 0; i < expectedItemCount; i++) {
InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue);
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode);
}
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
if (qmEnabled != null) {
options = options.setQueryMetricsEnabled(qmEnabled);
}
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5);
Iterator<FeedResponse<InternalObjectNode>> iterator =
this.container
.readAllItems(
new PartitionKey(pkValue),
options,
InternalObjectNode.class)
.iterableByPage().iterator();
assertThat(iterator.hasNext()).isTrue();
int actualItemCount = 0;
while (iterator.hasNext()) {
FeedResponse<InternalObjectNode> feedResponse = iterator.next();
String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString();
actualItemCount += feedResponse.getResults().size();
validateQueryDiagnostics(queryDiagnostics, qmEnabled, false);
}
assertThat(actualItemCount).isEqualTo(expectedItemCount);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnException() throws Exception {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
assertThat(diagnostics).contains("\"backendLatencyInMs\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource Not Found.");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\":null");
isValidJSON(diagnostics);
validateTransportRequestTimelineDirect(diagnostics);
validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient());
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void directDiagnosticsOnMetadataException() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class)))
.thenReturn(Mono.error(new CosmosException(400, "TestBadRequest")));
RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper());
ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient);
container.createItem(internalObjectNode);
fail("request should fail as bad request");
} catch (CosmosException exception) {
isValidJSON(exception.toString());
isValidJSON(exception.getMessage());
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("\"exceptionMessage\":\"TestBadRequest\"");
assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null"));
assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\"");
assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty();
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
isValidJSON(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void supplementalResponseStatisticsList() throws Exception {
ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext());
for (int i = 0; i < 15; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null);
}
List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
ObjectMapper objectMapper = new ObjectMapper();
String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
JsonNode jsonNode = objectMapper.readTree(diagnostics);
ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(15);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10);
clearStoreResponseStatistics(clientSideRequestStatistics);
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
assertThat(storeResponseStatistics.size()).isEqualTo(0);
for (int i = 0; i < 7; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null);
}
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
objectMapper = new ObjectMapper();
diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
jsonNode = objectMapper.readTree(diagnostics);
supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(7);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7);
for(JsonNode node : supplementalResponseStatisticsListNode) {
assertThat(node.get("storeResult").asText()).isNotNull();
String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText();
Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC));
assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000);
assertThat(node.get("requestResponseTimeUTC")).isNotNull();
assertThat(node.get("requestOperationType")).isNotNull();
assertThat(node.get("requestSessionToken")).isNotNull();
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void serializationOnVariousScenarios() {
CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read();
String diagnostics = cosmosDatabase.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\"");
CosmosContainerResponse containerResponse = this.container.read();
diagnostics = containerResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\"");
TestItem testItem = new TestItem();
testItem.id = "TestId";
testItem.mypk = "TestPk";
CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
testItem.id = "TestId2";
testItem.mypk = "TestPk";
itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\"");
TestItem readTestItem = itemResponse.getItem();
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class);
InternalObjectNode properties = readItemResponse.getItem();
diagnostics = readItemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*");
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void rntbdRequestResponseLengthStatistics() throws Exception {
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem);
validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse));
try {
container.createItem(testItem);
fail("expected to fail due to 409");
} catch (CosmosException e) {
logger.info("Diagnostics are : {}", e.getDiagnostics());
String diagnostics = e.getDiagnostics().toString();
assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\"");
assertThat(diagnostics).contains("\"exceptionResponseHeaders\"");
assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\"");
validate(e.getDiagnostics(), testItemLength, 0);
}
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
}
@Test(groups = {"simple"}, dataProvider = "connectionStateListenerArgProvider", timeOut = TIMEOUT)
public void rntbdStatistics(boolean connectionStateListenerEnabled) throws Exception {
Instant beforeClientInitialization = Instant.now();
CosmosClient client1 = null;
try {
DirectConnectionConfig connectionConfig = DirectConnectionConfig.getDefaultConfig();
connectionConfig.setConnectionEndpointRediscoveryEnabled(connectionStateListenerEnabled);
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode(connectionConfig)
.buildClient();
TestItem testItem = new TestItem();
testItem.id = UUID.randomUUID().toString();
testItem.mypk = UUID.randomUUID().toString();
int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length;
CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId());
Thread.sleep(1000);
Instant beforeInitializingRntbdServiceEndpoint = Instant.now();
CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem);
Instant afterInitializingRntbdServiceEndpoint = Instant.now();
Thread.sleep(1000);
Instant beforeOperation2 = Instant.now();
CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem);
Instant afterOperation2 = Instant.now();
Thread.sleep(1000);
Instant beforeOperation3 = Instant.now();
CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem);
Instant afterOperation3 = Instant.now();
validateRntbdStatistics(operation3Response.getDiagnostics(),
beforeClientInitialization,
beforeInitializingRntbdServiceEndpoint,
afterInitializingRntbdServiceEndpoint,
beforeOperation2,
afterOperation2,
beforeOperation3,
afterOperation3,
connectionStateListenerEnabled);
CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class);
validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse));
CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null);
validate(deleteItemResponse.getDiagnostics(), 0, 0);
} finally {
LifeCycleUtils.closeQuietly(client1);
}
}
private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics,
Instant clientInitializationTime,
Instant beforeInitializingRntbdServiceEndpoint,
Instant afterInitializingRntbdServiceEndpoint,
Instant beforeOperation2,
Instant afterOperation2,
Instant beforeOperation3,
Instant afterOperation3,
boolean connectionStateListenerEnabled) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
assertThat(storeResult).isNotNull();
assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThanOrEqualTo(0);
assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0);
JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics");
assertThat(serviceEndpointStatistics).isNotNull();
assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0);
assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0);
assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1);
assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false);
JsonNode connectionStateListenerMetrics = serviceEndpointStatistics.get("cerMetrics");
if (connectionStateListenerEnabled) {
assertThat(connectionStateListenerMetrics).isNotNull();
assertThat(connectionStateListenerMetrics.get("lastCallTimestamp")).isNull();
assertThat(connectionStateListenerMetrics.get("lastActionableContext")).isNull();
} else {
assertThat(connectionStateListenerMetrics).isNull();
}
Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isAfterOrEqualTo(beforeInitializationThreshold);
Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText()))
.isBeforeOrEqualTo(afterInitializationThreshold);
Instant afterOperation2Threshold = afterOperation2.plusMillis(2);
Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2);
assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold.toString())
.isBeforeOrEqualTo(afterOperation2Threshold.toString());
assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText()))
.isAfterOrEqualTo(beforeOperation2Threshold.toString())
.isBeforeOrEqualTo(afterOperation2Threshold.toString());
}
private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception {
ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString());
JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList");
assertThat(responseStatisticsList.isArray()).isTrue();
assertThat(responseStatisticsList.size()).isGreaterThan(0);
JsonNode storeResult = responseStatisticsList.get(0).get("storeResult");
boolean hasPayload = storeResult.get("exceptionMessage") == null;
assertThat(storeResult).isNotNull();
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize);
assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize);
if (hasPayload) {
assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize);
}
assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize);
}
@Test(groups = {"emulator"}, timeOut = TIMEOUT)
public void addressResolutionStatistics() {
CosmosClient client1 = null;
CosmosClient client2 = null;
String databaseId = DatabaseForTest.generateId();
String containerId = UUID.randomUUID().toString();
CosmosDatabase cosmosDatabase = null;
CosmosContainer cosmosContainer = null;
try {
client1 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
client1.createDatabase(databaseId);
cosmosDatabase = client1.getDatabase(databaseId);
cosmosDatabase.createContainer(containerId, "/mypk");
InternalObjectNode internalObjectNode = getInternalObjectNode();
cosmosContainer = cosmosDatabase.getContainer(containerId);
CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode);
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":null");
client2 = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosDatabase = client2.getDatabase(databaseId);
cosmosContainer = cosmosDatabase.getContainer(containerId);
AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient();
GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient,
"addressResolver", true);
@SuppressWarnings("rawtypes")
Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver,
"addressCacheByEndpoint",
true);
Object endpointCache = addressCacheByEndpoint.values().toArray()[0];
GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true);
HttpClient httpClient = httpClient(true);
FieldUtils.writeField(addressCache, "httpClient", httpClient, true);
new Thread(() -> {
try {
Thread.sleep(5000);
HttpClient httpClient1 = httpClient(false);
FieldUtils.writeField(addressCache, "httpClient", httpClient1, true);
} catch (Exception e) {
fail(e.getMessage());
}
}).start();
PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk"));
CosmosItemResponse<InternalObjectNode> readResourceResponse =
cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(),
InternalObjectNode.class);
assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false");
assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\"");
assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":\"io.netty" +
".channel.AbstractChannel$AnnotatedConnectException: Connection refused");
} catch (Exception ex) {
logger.error("Error in test addressResolutionStatistics", ex);
fail("This test should not throw exception " + ex);
} finally {
safeDeleteSyncDatabase(cosmosDatabase);
if (client1 != null) {
client1.close();
}
if (client2 != null) {
client2.close();
}
}
}
private InternalObjectNode getInternalObjectNode() {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", uuid);
return internalObjectNode;
}
private InternalObjectNode getInternalObjectNode(String pkValue) {
InternalObjectNode internalObjectNode = new InternalObjectNode();
String uuid = UUID.randomUUID().toString();
internalObjectNode.setId(uuid);
BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue);
return internalObjectNode;
}
private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
@SuppressWarnings({"unchecked"})
List<ClientSideRequestStatistics.StoreResponseStatistics> list
= (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics);
return list;
}
private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>());
}
private void validateTransportRequestTimelineGateway(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"requestSent\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
}
private void validateTransportRequestTimelineDirect(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"created\"");
assertThat(diagnostics).contains("\"eventName\":\"queued\"");
assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\"");
assertThat(diagnostics).contains("\"eventName\":\"pipelined\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"decodeTime");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
assertThat(diagnostics).contains("\"eventName\":\"completed\"");
assertThat(diagnostics).contains("\"startTimeUTC\"");
assertThat(diagnostics).contains("\"durationInMilliSecs\"");
}
public void isValidJSON(final String json) {
try {
final JsonParser parser = new ObjectMapper().createParser(json);
while (parser.nextToken() != null) {
}
} catch (IOException ex) {
fail("Diagnostic string is not in json format ", ex);
}
}
private HttpClient httpClient(boolean fakeProxy) {
HttpClientConfig httpClientConfig;
if(fakeProxy) {
httpClientConfig = new HttpClientConfig(new Configs())
.withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)));
} else {
httpClientConfig = new HttpClientConfig(new Configs());
}
return HttpClient.createFixed(httpClientConfig);
}
private IndexUtilizationInfo createFromJSONString(String jsonString) {
ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper();
IndexUtilizationInfo indexUtilizationInfo = null;
try {
indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class);
} catch (JsonProcessingException e) {
logger.error("Json not correctly formed ", e);
}
return indexUtilizationInfo;
}
private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception {
RxDocumentClientImpl rxDocumentClient =
(RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient);
GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient);
LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager);
Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo");
locationInfoField.setAccessible(true);
Object locationInfo = locationInfoField.get(locationCache);
Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" +
".LocationCache$DatabaseAccountLocationsInfo");
Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField(
"availableWriteEndpointByLocation");
availableWriteEndpointByLocation.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo);
String regionName = map.keySet().iterator().next();
assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1);
assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase());
}
public static class TestItem {
public String id;
public String mypk;
public TestItem() {
}
}
} | |
So, this is a pattern I've seen in Storage and I'd like to begin changing it. Instead of calling into `options` based overload with a newed up options bag object could we either have a constant or call into the package-private overload that takes a splayed out options bag? It ends up being more verbose during development but to me it makes reading the flow of calls easier as there isn't as many jumps | public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
} | return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions()); | public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
No a big fan of having these validations in the try/catch block, could these be moved outside and return a `Mono` instead? | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | } | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
Collections.singletonList would be better here as it's only a one element list | private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
} | Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch))); | private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
I can create a static constant for empty options type but not sure what you mean by a package-private overload (we do have that overload but not sure if you meant something else). | public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
} | return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions()); | public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
I agree with moving this to `azure-core` and once it is in `azure-core` this can leverage `AccessibleByteArrayOutputStream` and return `ByteBuffer` instead of `byte[]` and we can optimize away copying the `ByteArrayOutputStream` bytes again | private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
} | private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
} | |
Another change I'd like to see is making `List<List<Object>> logBatches` and `List<byte[]> requests` into a tuple or simple holder object. Right now we're handling to lists with expected index matching, but what if there ends up being a race condition in `map` where `logBatchesIterator` is being used and it misaligns the two list indexes where request[0] -> logBatches[1] and request[1] -> logBatches[0] | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | List<byte[]> requests = createGzipRequests(logs, serializer, logBatches); | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
nit: I'd change this to `Mono.fromSupplier` to really make sure it's deferred | private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
} | ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE, | private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
Could this all be simplified into a single `for` loop? It looks like FAILURE vs PARTIAL_FAILURE is based on whether all requests were failures or only some, this could be made into a simple if ```java int failureCount = 0; List<ResponseError> errors = new ArrayList(); // This could be instantiated when needed but this is just a GitHub comment for (...) { if (result.getStatus() == UploadLogsResult.SUCCESS) { continue; // Was successful, nothing to inspect. } failureCount++; // Increase the failure count. errors.add(result.getErrors()); } if (failureCount == 0) { return new UploadLogsResult(UploadLogsStatus.SUCCESS, null); } else { return new UploadLogsResult(failureCount == results.size() ? UploadLogsStatus.FAILURE : UploadLogsStatus.PARTIAL_FAILURE, errors); } ``` | private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
} | } | private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
`map` should ensure there are no race conditions (as per reactive streams spec) and `flatMapSequential` should ensure ordering. So, requests and logBatches should never be intertwined. However, this does feel like a bit of reactor magic and we can just put the logBatches in the result holder type I created later. | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | List<byte[]> requests = createGzipRequests(logs, serializer, logBatches); | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
discussed offline and we'll consider changing the pattern for preview 2. | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | } | private Mono<UploadLogsResult> splitAndUpload(String dataCollectionRuleId, String streamName, List<Object> logs, UploadLogsOptions options, Context context) {
try {
Objects.requireNonNull(dataCollectionRuleId, "'dataCollectionRuleId' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'streamName' cannot be null.");
Objects.requireNonNull(dataCollectionRuleId, "'logs' cannot be null.");
if (logs.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'logs' cannot be empty."));
}
ObjectSerializer serializer = DEFAULT_SERIALIZER;
int concurrency = 1;
if (options != null) {
if (options.getObjectSerializer() != null) {
serializer = options.getObjectSerializer();
}
if (options.getMaxConcurrency() != null) {
concurrency = options.getMaxConcurrency();
}
}
List<List<Object>> logBatches = new ArrayList<>();
List<byte[]> requests = createGzipRequests(logs, serializer, logBatches);
RequestOptions requestOptions = new RequestOptions()
.addHeader(CONTENT_ENCODING, GZIP)
.setContext(context);
Iterator<List<Object>> logBatchesIterator = logBatches.iterator();
return Flux.fromIterable(requests)
.flatMapSequential(bytes ->
uploadToService(dataCollectionRuleId, streamName, requestOptions, bytes), concurrency)
.map(responseHolder -> mapResult(logBatchesIterator, responseHolder))
.collectList()
.map(this::createResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Arrays.asList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.just(new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
boolean allErrors = results.stream().allMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (allErrors) {
return new UploadLogsResult(UploadLogsStatus.FAILURE,
results.stream().flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
boolean anyErrors = results.stream().anyMatch(result -> result.getStatus() == UploadLogsStatus.FAILURE);
if (anyErrors) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE,
results.stream().filter(result -> result.getStatus() == UploadLogsStatus.FAILURE)
.flatMap(result -> result.getErrors().stream()).collect(Collectors.toList()));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} | class LogsIngestionAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class);
private static final String CONTENT_ENCODING = "Content-Encoding";
private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024;
private static final String GZIP = "gzip";
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final IngestionUsingDataCollectionRulesAsyncClient service;
LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) {
this.service = service;
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName, List<Object> logs) {
return upload(dataCollectionRuleId, streamName, logs, new UploadLogsOptions());
}
/**
* Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be
* too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split
* the input logs into multiple smaller requests before sending to the service.
* @param dataCollectionRuleId the data collection rule id that is configured to collect and transform the logs.
* @param streamName the stream name configured in data collection rule that matches defines the structure of the
* logs sent in this request.
* @param logs the collection of logs to be uploaded.
* @param options the options to configure the upload request.
* @return the result of the logs upload request.
* @throws NullPointerException if any of {@code dataCollectionRuleId}, {@code streamName} or {@code logs} are null.
* @throws IllegalArgumentException if {@code logs} is empty.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options) {
return withContext(context -> upload(dataCollectionRuleId, streamName, logs, options, context));
}
Mono<UploadLogsResult> upload(String dataCollectionRuleId, String streamName,
List<Object> logs, UploadLogsOptions options,
Context context) {
return Mono.defer(() -> splitAndUpload(dataCollectionRuleId, streamName, logs, options, context));
}
private UploadLogsResult mapResult(Iterator<List<Object>> logBatchesIterator, UploadLogsResponseHolder responseHolder) {
List<Object> logsBatch = logBatchesIterator.next();
if (responseHolder.getStatus() == UploadLogsStatus.FAILURE) {
return new UploadLogsResult(responseHolder.getStatus(),
Collections.singletonList(new UploadLogsError(responseHolder.getResponseError(), logsBatch)));
}
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
private Mono<UploadLogsResponseHolder> uploadToService(String dataCollectionRuleId, String streamName, RequestOptions requestOptions, byte[] bytes) {
return service.uploadWithResponse(dataCollectionRuleId, streamName,
BinaryData.fromBytes(bytes), requestOptions)
.map(response -> new UploadLogsResponseHolder(UploadLogsStatus.SUCCESS, null))
.onErrorResume(HttpResponseException.class,
ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(UploadLogsStatus.FAILURE,
mapToResponseError(ex))));
}
/**
* Method to map the exception to {@link ResponseError}.
* @param ex the {@link HttpResponseException}.
* @return the mapped {@link ResponseError}.
*/
private ResponseError mapToResponseError(HttpResponseException ex) {
ResponseError responseError = null;
if (ex.getValue() instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, Object> errorMap = (LinkedHashMap<String, Object>) ex.getValue();
if (errorMap.containsKey("error")) {
Object error = errorMap.get("error");
if (error instanceof LinkedHashMap<?, ?>) {
@SuppressWarnings("unchecked")
LinkedHashMap<String, String> errorDetails = (LinkedHashMap<String, String>) error;
if (errorDetails.containsKey("code") && errorDetails.containsKey("message")) {
responseError = new ResponseError(errorDetails.get("code"), errorDetails.get("message"));
}
}
}
}
return responseError;
}
private UploadLogsResult createResponse(List<UploadLogsResult> results) {
int failureCount = 0;
List<UploadLogsError> errors = null;
for (UploadLogsResult result : results) {
if (result.getStatus() != UploadLogsStatus.SUCCESS) {
failureCount++;
if (errors == null) {
errors = new ArrayList<>();
}
errors.addAll(result.getErrors());
}
}
if (failureCount == 0) {
return new UploadLogsResult(UploadLogsStatus.SUCCESS, null);
}
if (failureCount < results.size()) {
return new UploadLogsResult(UploadLogsStatus.PARTIAL_FAILURE, errors);
}
return new UploadLogsResult(UploadLogsStatus.FAILURE, errors);
}
private List<byte[]> createGzipRequests(List<Object> logs, ObjectSerializer serializer,
List<List<Object>> logBatches) {
try {
List<byte[]> requests = new ArrayList<>();
long currentBatchSize = 0;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
List<String> serializedLogs = new ArrayList<>();
int currentBatchStart = 0;
for (int i = 0; i < logs.size(); i++) {
byte[] bytes = serializer.serializeToBytes(logs.get(i));
int currentLogSize = bytes.length;
currentBatchSize += currentLogSize;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
byteArrayOutputStream = new ByteArrayOutputStream();
generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream);
generator.writeStartArray();
currentBatchSize = currentLogSize;
serializedLogs.clear();
logBatches.add(logs.subList(currentBatchStart, i));
currentBatchStart = i;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
}
if (currentBatchSize > 0) {
writeLogsAndCloseJsonGenerator(generator, serializedLogs);
requests.add(gzipRequest(byteArrayOutputStream.toByteArray()));
logBatches.add(logs.subList(currentBatchStart, logs.size()));
}
return requests;
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
}
private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException {
generator.writeRaw(serializedLogs.stream()
.collect(Collectors.joining(",")));
generator.writeEndArray();
generator.close();
}
/**
* Gzips the input byte array.
* @param bytes The input byte array.
* @return gzipped byte array.
*/
private byte[] gzipRequest(byte[] bytes) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) {
zip.write(bytes);
} catch (IOException exception) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(exception));
}
return byteArrayOutputStream.toByteArray();
}
} |
JSON -> Json | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJSONList = jedis.hmget(member, "checkpoint");
String checkpointJSON;
if (checkpointJSONList.size() == 0) {
return Flux.error(new IllegalAccessException());
}
else {
checkpointJSON = checkpointJSONList.get(0);
}
try {
Checkpoint checkpoint = jacksonAdapter.deserialize(checkpointJSON, Checkpoint.class, SerializerEncoding.JSON);
list.add(checkpoint);
}
catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions
.propagate(e)); }
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | checkpointJSON = checkpointJSONList.get(0); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private final JacksonAdapter jacksonAdapter = new JacksonAdapter();
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
for (String member : members) {
String partitionOwnershipJSON = jedis.hmget(member, "partitionOwnership").get(0);
try {
PartitionOwnership partitionOwnership = jacksonAdapter.deserialize(partitionOwnershipJSON, PartitionOwnership.class, SerializerEncoding.JSON);
list.add(partitionOwnership);
}
catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions
.propagate(e)); }
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
try {
jedis.hset(key, "checkpoint", jacksonAdapter.serialize(checkpoint, SerializerEncoding.JSON));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(Exceptions
.propagate(e));
}
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
There is an Assertions.assertThrows to use rather than this try/catch block. | public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(anyString())).thenThrow(new IllegalArgumentException());
try {
store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup");
} catch (IllegalArgumentException e) {
assert (true);
return;
}
assert (false);
} | try { | public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
JedisPool jedisPool;
JedisRedisCheckpointStore store;
Jedis jedis;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
JacksonAdapter jacksonAdapter = new JacksonAdapter();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup");
try {
list.add(jacksonAdapter.serialize(checkpoint, SerializerEncoding.JSON));
}
catch (IOException e) {
System.out.println("Hello");
}
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(anyString())).thenReturn(value);
when(jedis.hmget(anyString(), anyString())).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
anyString is too vague. How do you know that it's what you expect and not random characters? Same with others below. Is anyString() what you want? | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup("consumerGroup")
.setEventHubName("eventHubName")
.setFullyQualifiedNamespace("fullyQualifiedNamespace")
.setPartitionId("one")
.setSequenceNumber((long) 1);
Set<String> value = new HashSet<>();
List<String> list = new ArrayList<>();
JacksonAdapter jacksonAdapter = new JacksonAdapter();
value.add("fullyQualifiedNamespace/eventHubNamespace/consumerGroup");
try {
list.add(jacksonAdapter.serialize(checkpoint, SerializerEncoding.JSON));
}
catch (IOException e) {
System.out.println("Hello");
}
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(anyString())).thenReturn(value);
when(jedis.hmget(anyString(), anyString())).thenReturn(list);
StepVerifier.create(store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup"))
.assertNext(checkpointTest -> {
assertEquals("fullyQualifiedNamespace", checkpointTest.getFullyQualifiedNamespace());
assertEquals("eventHubName", checkpointTest.getEventHubName());
assertEquals("consumerGroup", checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | when(jedis.smembers(anyString())).thenReturn(value); | public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
JedisPool jedisPool;
JedisRedisCheckpointStore store;
Jedis jedis;
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(anyString())).thenThrow(new IllegalArgumentException());
try {
store.listCheckpoints("fullyQualifiedNamespace", "eventHubName", "consumerGroup");
} catch (IllegalArgumentException e) {
assert (true);
return;
}
assert (false);
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
@Test
public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
this line isn't needed. | public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
} | .expectNextCount(0L) | public void testListCheckpointsEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(null);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectError(IllegalStateException.class)
.verify();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.expectNextCount(0L)
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.expectNextCount(0L)
.verifyComplete();
}
} | class JedisRedisCheckpointStoreTests {
private JedisPool jedisPool;
private JedisRedisCheckpointStore store;
private Jedis jedis;
private JsonSerializer jsonSerializer;
private static final String FULLY_QUALIFIED_NAMESPACE = "fullyQualifiedNamespace";
private static final String EVENT_HUB_NAME = "eventHubName";
private static final String CONSUMER_GROUP = "consumerGroup";
private static final String PARTITION_ID = "1";
private static final String PREFIX = JedisRedisCheckpointStore.prefixBuilder(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP);
private static final String KEY = JedisRedisCheckpointStore.keyBuilder(PREFIX, PARTITION_ID);
@BeforeEach
public void setup() {
jedisPool = mock(JedisPool.class);
jedis = mock(Jedis.class);
store = new JedisRedisCheckpointStore(jedisPool);
jsonSerializer = JsonSerializerProviders.createInstance(true);
}
@Test
public void testListCheckpoints() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber(1L);
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(checkpoint);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(list);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(checkpointTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, checkpointTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, checkpointTest.getEventHubName());
assertEquals(CONSUMER_GROUP, checkpointTest.getConsumerGroup());
})
.verifyComplete();
}
@Test
@Test
public void testCheckpointKeyNotStored() {
Set<String> value = new HashSet<>();
List<String> nullList = Collections.singletonList(null);
value.add(KEY);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.CHECKPOINT))).thenReturn(nullList);
StepVerifier.create(store.listCheckpoints(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testListOwnership() {
PartitionOwnership partitionOwnership = new PartitionOwnership()
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setEventHubName(EVENT_HUB_NAME)
.setConsumerGroup(CONSUMER_GROUP)
.setPartitionId(PARTITION_ID)
.setOwnerId("ownerOne")
.setETag("eTag");
Set<String> value = new HashSet<>();
value.add(KEY);
byte[] bytes = jsonSerializer.serializeToBytes(partitionOwnership);
List<String> list = Collections.singletonList(new String(bytes, StandardCharsets.UTF_8));
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(value);
when(jedis.hmget(eq(KEY),
eq(JedisRedisCheckpointStore.PARTITION_OWNERSHIP))).thenReturn(list);
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.assertNext(partitionOwnershipTest -> {
assertEquals(FULLY_QUALIFIED_NAMESPACE, partitionOwnershipTest.getFullyQualifiedNamespace());
assertEquals(EVENT_HUB_NAME, partitionOwnershipTest.getEventHubName());
assertEquals(CONSUMER_GROUP, partitionOwnershipTest.getConsumerGroup());
assertEquals("ownerOne", partitionOwnershipTest.getOwnerId());
})
.verifyComplete();
}
@Test
public void testListOwnershipEmptyList() {
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.smembers(PREFIX)).thenReturn(new HashSet<>());
StepVerifier.create(store.listOwnership(FULLY_QUALIFIED_NAMESPACE, EVENT_HUB_NAME, CONSUMER_GROUP))
.verifyComplete();
}
@Test
public void testClaimOwnership() {
List<PartitionOwnership> partitionOwnershipList = new ArrayList<>();
StepVerifier.create(store.claimOwnership(partitionOwnershipList))
.verifyComplete();
}
@Test
public void testUpdateCheckpoint() {
Checkpoint checkpoint = new Checkpoint()
.setConsumerGroup(CONSUMER_GROUP)
.setEventHubName(EVENT_HUB_NAME)
.setFullyQualifiedNamespace(FULLY_QUALIFIED_NAMESPACE)
.setPartitionId(PARTITION_ID)
.setSequenceNumber((long) 1);
when(jedisPool.getResource()).thenReturn(jedis);
when(jedis.exists(PREFIX)).thenReturn(true);
StepVerifier.create(store.updateCheckpoint(checkpoint))
.verifyComplete();
}
} |
Is there a reason to assign this to a local variable rather than referencing it directly? | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | ObjectSerializer serializer = DEFAULT_SERIALIZER; | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
The behaviour here contradicts the javadoc expectation for this method a few lines above. See: https://github.com/Azure/azure-sdk-for-java/pull/29590/files#diff-1044389012002b4bb7b97998ea2d781b0dfd5355433e44c5397f9a1b7e9c256cR51 > and if no checkpoints are available, then it returns empty results. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | return Flux.error(new IllegalArgumentException()); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
If this field is empty, could it mean that there was no checkpoint persisted for this partition? In this case, you would immediately return an error... What would a customer expect? | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | return Flux.error(new NoSuchElementException()); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
If there are no checkpoints to list, should we be returning an empty array rather than an exception? | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | return Flux.error(new IllegalArgumentException()); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
IIRC, a list has an isEmpty() method. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | if (checkpointJsonList.size() == 0) { | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
These special field names should be stored as a constant. So, if we update this field name, we can refactor -> rename with the help of intellisense rather than manually looking for instances. | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
ArrayList<Checkpoint> list = new ArrayList<>();
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, "checkpoint");
String checkpointJson;
if (checkpointJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
}
else {
checkpointJson = checkpointJsonList.get(0);
}
Checkpoint checkpoint = serializer.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
list.add(checkpoint);
}
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
} | List<String> checkpointJsonList = jedis.hmget(member, "checkpoint"); | public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
ArrayList<Checkpoint> listStoredCheckpoints = new ArrayList<>();
Set<String> members = jedis.smembers(prefix);
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
for (String member : members) {
List<String> checkpointJsonList = jedis.hmget(member, CHECKPOINT);
if (!checkpointJsonList.isEmpty()) {
String checkpointJson = checkpointJsonList.get(0);
if (checkpointJson == null) {
LOGGER.verbose("No checkpoint persists yet.");
continue;
}
Checkpoint checkpoint = DEFAULT_SERIALIZER.deserializeFromBytes(checkpointJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(Checkpoint.class));
listStoredCheckpoints.add(checkpoint);
} else {
LOGGER.verbose("No checkpoint persists yet.");
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredCheckpoints);
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private final JedisPool jedisPool;
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return null;
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
ObjectSerializer serializer = DEFAULT_SERIALIZER;
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> list = new ArrayList<>();
if (members.isEmpty()) {
return Flux.error(new IllegalArgumentException());
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, "partitionOwnership");
String partitionOwnershipJson;
if (partitionOwnershipJsonList.size() == 0) {
return Flux.error(new NoSuchElementException());
} else {
partitionOwnershipJson = partitionOwnershipJsonList.get(0);
}
PartitionOwnership partitionOwnership = serializer.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
list.add(partitionOwnership); }
jedisPool.returnResource(jedis);
return Flux.fromStream(list.stream());
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
ObjectSerializer serializer = DEFAULT_SERIALIZER;
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, "checkpoint", new String(serializer.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return null;
}
private String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
private String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} | class JedisRedisCheckpointStore implements CheckpointStore {
private static final ClientLogger LOGGER = new ClientLogger(JedisRedisCheckpointStore.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
static final String CHECKPOINT = "checkpoint";
static final String PARTITION_OWNERSHIP = "partitionOwnership";
private final JedisPool jedisPool;
JedisRedisCheckpointStore(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
/**
* This method returns the list of partitions that were owned successfully.
*
* @param requestedPartitionOwnerships List of partition ownerships from the current instance
* @return Null
*/
@Override
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) {
return Flux.fromIterable(new ArrayList<>());
}
/**
* This method returns the list of checkpoints from the underlying data store, and if no checkpoints are available, then it returns empty results.
*
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
/**
* @param fullyQualifiedNamespace The fully qualified namespace of the current instance of Event Hub
* @param eventHubName The Event Hub name from which checkpoint information is acquired
* @param consumerGroup The consumer group name associated with the checkpoint
* @return Null
*/
@Override
public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
String prefix = prefixBuilder(fullyQualifiedNamespace, eventHubName, consumerGroup);
try (Jedis jedis = jedisPool.getResource()) {
Set<String> members = jedis.smembers(prefix);
ArrayList<PartitionOwnership> listStoredOwnerships = new ArrayList<>();
if (members.isEmpty()) {
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
for (String member : members) {
List<String> partitionOwnershipJsonList = jedis.hmget(member, PARTITION_OWNERSHIP);
if (!partitionOwnershipJsonList.isEmpty()) {
String partitionOwnershipJson = partitionOwnershipJsonList.get(0);
if (partitionOwnershipJson == null) {
LOGGER.verbose("No partition ownership records exist for this checkpoint yet.");
continue;
}
PartitionOwnership partitionOwnership = DEFAULT_SERIALIZER.deserializeFromBytes(partitionOwnershipJson.getBytes(StandardCharsets.UTF_8), TypeReference.createInstance(PartitionOwnership.class));
listStoredOwnerships.add(partitionOwnership);
}
}
jedisPool.returnResource(jedis);
return Flux.fromIterable(listStoredOwnerships);
}
}
/**
* This method updates the checkpoint in the Jedis resource for a given partition.
*
* @param checkpoint Checkpoint information for this partition
* @return Null
*/
@Override
public Mono<Void> updateCheckpoint(Checkpoint checkpoint) {
if (!isCheckpointValid(checkpoint)) {
throw LOGGER.logExceptionAsWarning(Exceptions
.propagate(new IllegalStateException(
"Checkpoint is either null, or both the offset and the sequence number are null.")));
}
String prefix = prefixBuilder(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
String key = keyBuilder(prefix, checkpoint.getPartitionId());
try (Jedis jedis = jedisPool.getResource()) {
if (!jedis.exists(prefix) || !jedis.exists(key)) {
jedis.sadd(prefix, key);
jedis.hset(key, CHECKPOINT, new String(DEFAULT_SERIALIZER.serializeToBytes(checkpoint), StandardCharsets.UTF_8));
}
jedisPool.returnResource(jedis);
}
return Mono.empty();
}
static String prefixBuilder(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return fullyQualifiedNamespace + "/" + eventHubName + "/" + consumerGroup;
}
static String keyBuilder(String prefix, String partitionId) {
return prefix + "/" + partitionId;
}
private static Boolean isCheckpointValid(Checkpoint checkpoint) {
return !(checkpoint == null || (checkpoint.getOffset() == null && checkpoint.getSequenceNumber() == null));
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.