comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
We need to do this to preserve the case where a user has a URL that looks encoded. - Like if they pass in a url with hello%20hello - if we encode that it'll become hello(whatever % encoding is)20hello, which we don't want to do | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | path = Utility.urlEncode(Utility.urlDecode(path)); | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
This particular if case is because when you call getPath, it returns the path including the starting /. Then when you do setPath for some reason, it makes the URL have two slashes - so I'm just trying to prevent that error from happening. Does that make sense? | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | if (path.startsWith("/")) { | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
That's not what I meant. Please take a look on this sample url https://github.com/Azure/azure-sdk-for-java/blob/6e23e4acf9b639a87da2bd0f95923c7370290dc8/sdk/storage/azure-storage-blob/src/test/resources/session-records/BlobAPITestcopysourceac%5B4%5D.json#L95 `https://azstoragesdkaccount.blob.core.windows.net/jtccopysourceac0blobapitestcopysourceac26775340db315343/javablobcopysourceac1blobapitestcopysourceac26713808da3bf` . the last `/` in this url is going to be encoded into `%2F` I think. Shouldn't we split path by `/` before encoding and just transform segments ? | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | if (path.startsWith("/")) { | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
That makes sense. | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | path = Utility.urlEncode(Utility.urlDecode(path)); | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
Oh. Both are equivalent as per the service so it shouldnt matter | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | if (path.startsWith("/")) { | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
Do we have a test covering that? | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | if (path.startsWith("/")) { | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
The fact that the tests work is validation that they are the same since otherwise the service would come back with a 404 saying the source doesnt exist. | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | if (path.startsWith("/")) { | public static String encodeUrlPath(String url) {
/* Deconstruct the URL and reconstruct it making sure the path is encoded. */
UrlBuilder builder = UrlBuilder.parse(url);
String path = builder.getPath();
if (path.startsWith("/")) {
path = path.substring(1);
}
path = Utility.urlEncode(Utility.urlDecode(path));
builder.setPath(path);
return builder.toString();
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of a url string, only encoding the path.
*
* @param url The url to encode.
* @return The encoded url.
*/
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
}
} |
I think this need to go into troubleshooting string, rather than just Exception.toSTring() @simplynaveen20 | public String toString() {
return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='" | public String toString() {
return getClass().getSimpleName() + "{" + "userAgent=" + USER_AGENT + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String SDK_VERSION = HttpConstants.Versions.SDK_VERSION;
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String USER_AGENT = Utils.getUserAgent();
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} |
My initial thought on that is, we don't want our customers to rely on getting diagnostics string and then logging the exception. It would be much better to have it in all scenarios. For example, below is a very common scenario is : ``` try { some operation. } catch (Exception e) { logger.error("Exception occurred", e); } ``` That being said, we can also add this to diagnostics string. | public String toString() {
return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='" | public String toString() {
return getClass().getSimpleName() + "{" + "userAgent=" + USER_AGENT + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String SDK_VERSION = HttpConstants.Versions.SDK_VERSION;
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String USER_AGENT = Utils.getUserAgent();
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} |
diagnostics string should have all the needed info for troubleshooting (including sdk version). However it is good to have it here too. We can consider adding it to troubleshooting string later. | public String toString() {
return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='" | public String toString() {
return getClass().getSimpleName() + "{" + "userAgent=" + USER_AGENT + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String SDK_VERSION = HttpConstants.Versions.SDK_VERSION;
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String USER_AGENT = Utils.getUserAgent();
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} |
Valid point, I can add it to the diagnostics string as well in this PR. Let me see what needs to be done for that. | public String toString() {
return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='" | public String toString() {
return getClass().getSimpleName() + "{" + "userAgent=" + USER_AGENT + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String SDK_VERSION = HttpConstants.Versions.SDK_VERSION;
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String USER_AGENT = Utils.getUserAgent();
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} |
@moderakh @simplynaveen20 - would it make sense to include the complete `user agent string` on diagnostics or just the SDK_VERSION ? | public String toString() {
return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='" | public String toString() {
return getClass().getSimpleName() + "{" + "userAgent=" + USER_AGENT + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String SDK_VERSION = HttpConstants.Versions.SDK_VERSION;
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String USER_AGENT = Utils.getUserAgent();
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} |
Honestly I prefer it to diagnostic , given we print diagnostic on every exception. It look little odd here on exception | public String toString() {
return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | return getClass().getSimpleName() + "{" + "sdkVersion=" + SDK_VERSION + ", error=" + cosmosError + ", resourceAddress='" | public String toString() {
return getClass().getSimpleName() + "{" + "userAgent=" + USER_AGENT + ", error=" + cosmosError + ", resourceAddress='"
+ resourceAddress + '\'' + ", statusCode=" + statusCode + ", message=" + getMessage()
+ ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders="
+ filterSensitiveData(requestHeaders) + '}';
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String SDK_VERSION = HttpConstants.Versions.SDK_VERSION;
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} | class CosmosException extends AzureException {
private static final long serialVersionUID = 1L;
private final static String USER_AGENT = Utils.getUserAgent();
private final int statusCode;
private final Map<String, String> responseHeaders;
private CosmosDiagnostics cosmosDiagnostics;
private final RequestTimeline requestTimeline;
private CosmosError cosmosError;
long lsn;
String partitionKeyRangeId;
Map<String, String> requestHeaders;
Uri requestUri;
String resourceAddress;
protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) {
super(message, cause);
this.statusCode = statusCode;
this.requestTimeline = RequestTimeline.empty();
this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
*/
CosmosException(int statusCode) {
this(statusCode, null, null, null);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param errorMessage the error message.
*/
protected CosmosException(int statusCode, String errorMessage) {
this(statusCode, errorMessage, null, null);
this.cosmosError = new CosmosError();
ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param innerException the original exception.
*/
protected CosmosException(int statusCode, Exception innerException) {
this(statusCode, null, null, innerException);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) {
this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders);
}
/**
* Creates a new instance of the CosmosException class.
*
* @param resourceAddress the address of the resource the request is associated with.
* @param statusCode the http status code of the response.
* @param cosmosErrorResource the error resource object.
* @param responseHeaders the response headers.
*/
protected CosmosException(String resourceAddress,
int statusCode,
CosmosError cosmosErrorResource,
Map<String, String> responseHeaders) {
this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null);
this.resourceAddress = resourceAddress;
this.cosmosError = cosmosErrorResource;
}
/**
* Creates a new instance of the CosmosException class.
*
* @param message the string message.
* @param statusCode the http status code of the response.
* @param exception the exception object.
* @param responseHeaders the response headers.
* @param resourceAddress the address of the resource the request is associated with.
*/
protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode,
String resourceAddress) {
this(statusCode, message, responseHeaders, exception);
this.resourceAddress = resourceAddress;
}
@Override
public String getMessage() {
if (cosmosDiagnostics == null) {
return innerErrorMessage();
}
return innerErrorMessage() + ", " + cosmosDiagnostics.toString();
}
/**
* Gets the activity ID associated with the request.
*
* @return the activity ID.
*/
public String getActivityId() {
if (this.responseHeaders != null) {
return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID);
}
return null;
}
/**
* Gets the http status code.
*
* @return the status code.
*/
public int getStatusCode() {
return this.statusCode;
}
/**
* Gets the sub status code.
*
* @return the status code.
*/
public int getSubStatusCode() {
int code = HttpConstants.SubStatusCodes.UNKNOWN;
if (this.responseHeaders != null) {
String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS);
if (StringUtils.isNotEmpty(subStatusString)) {
try {
code = Integer.parseInt(subStatusString);
} catch (NumberFormatException e) {
}
}
}
return code;
}
/**
* Gets the error code associated with the exception.
*
* @return the error.
*/
CosmosError getError() {
return this.cosmosError;
}
void setError(CosmosError cosmosError) {
this.cosmosError = cosmosError;
}
/**
* Gets the recommended time duration after which the client can retry failed
* requests
*
* @return the recommended time duration after which the client can retry failed
* requests.
*/
public Duration getRetryAfterDuration() {
long retryIntervalInMilliseconds = 0;
if (this.responseHeaders != null) {
String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS);
if (StringUtils.isNotEmpty(header)) {
try {
retryIntervalInMilliseconds = Long.parseLong(header);
} catch (NumberFormatException e) {
}
}
}
return Duration.ofMillis(retryIntervalInMilliseconds);
}
/**
* Gets the response headers as key-value pairs
*
* @return the response headers
*/
public Map<String, String> getResponseHeaders() {
return this.responseHeaders;
}
/**
* Gets the resource address associated with this exception.
*
* @return the resource address associated with this exception.
*/
String getResourceAddress() {
return this.resourceAddress;
}
/**
* Gets the Cosmos Diagnostic Statistics associated with this exception.
*
* @return Cosmos Diagnostic Statistics associated with this exception.
*/
public CosmosDiagnostics getDiagnostics() {
return cosmosDiagnostics;
}
CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) {
this.cosmosDiagnostics = cosmosDiagnostics;
return this;
}
@Override
String innerErrorMessage() {
String innerErrorMessage = super.getMessage();
if (cosmosError != null) {
innerErrorMessage = cosmosError.getMessage();
if (innerErrorMessage == null) {
innerErrorMessage = String.valueOf(
ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors"));
}
}
return innerErrorMessage;
}
private String causeInfo() {
Throwable cause = getCause();
if (cause != null) {
return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage());
}
return null;
}
private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) {
if (requestHeaders == null) {
return null;
}
return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey()))
.collect(Collectors.toList());
}
void setResourceAddress(String resourceAddress) {
this.resourceAddress = resourceAddress;
}
} |
Is there an Environment.NewLine or equivalent for Java? It might not work correctly on Windows where new line is `\r\n` | public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("userAgent=").append(USER_AGENT).append("\n");
if (this.feedResponseDiagnostics != null) {
stringBuilder.append(feedResponseDiagnostics);
} else {
try {
stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics));
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
}
return stringBuilder.toString();
} | stringBuilder.append("userAgent=").append(USER_AGENT).append("\n"); | public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("userAgent=").append(USER_AGENT).append("\n");
if (this.feedResponseDiagnostics != null) {
stringBuilder.append(feedResponseDiagnostics);
} else {
try {
stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics));
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
}
return stringBuilder.toString();
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String USER_AGENT = Utils.getUserAgent();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String USER_AGENT = Utils.getUserAgent();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} |
Good point @j82w, we can use - `System.lineSeparator()` https://docs.oracle.com/javase/8/docs/api/java/lang/System.html#lineSeparator-- I will port that change as a separate PR for everywhere in SDK code. | public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("userAgent=").append(USER_AGENT).append("\n");
if (this.feedResponseDiagnostics != null) {
stringBuilder.append(feedResponseDiagnostics);
} else {
try {
stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics));
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
}
return stringBuilder.toString();
} | stringBuilder.append("userAgent=").append(USER_AGENT).append("\n"); | public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("userAgent=").append(USER_AGENT).append("\n");
if (this.feedResponseDiagnostics != null) {
stringBuilder.append(feedResponseDiagnostics);
} else {
try {
stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics));
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
}
return stringBuilder.toString();
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String USER_AGENT = Utils.getUserAgent();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String USER_AGENT = Utils.getUserAgent();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} |
Tracking this issue here : https://github.com/Azure/azure-sdk-for-java/issues/11594 | public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("userAgent=").append(USER_AGENT).append("\n");
if (this.feedResponseDiagnostics != null) {
stringBuilder.append(feedResponseDiagnostics);
} else {
try {
stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics));
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
}
return stringBuilder.toString();
} | stringBuilder.append("userAgent=").append(USER_AGENT).append("\n"); | public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("userAgent=").append(USER_AGENT).append("\n");
if (this.feedResponseDiagnostics != null) {
stringBuilder.append(feedResponseDiagnostics);
} else {
try {
stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics));
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
}
return stringBuilder.toString();
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String USER_AGENT = Utils.getUserAgent();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String USER_AGENT = Utils.getUserAgent();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} |
We should also rename the string wordings -> `maxChannelsPerEndpoint` -> `maxConnectionsPerEndpoint` and `maxRequestsPerChannel` -> `maxRequestsPerConnection` | public String toString() {
return "ConnectionPolicy{" +
"requestTimeout=" + requestTimeout +
", connectionMode=" + connectionMode +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", userAgentSuffix='" + userAgentSuffix + '\'' +
", throttlingRetryOptions=" + throttlingRetryOptions +
", endpointDiscoveryEnabled=" + endpointDiscoveryEnabled +
", preferredRegions=" + preferredRegions +
", multipleWriteRegionsEnabled=" + multipleWriteRegionsEnabled +
", inetSocketProxyAddress=" + inetSocketProxyAddress +
", readRequestsFallbackEnabled=" + readRequestsFallbackEnabled +
", connectionTimeout=" + connectionTimeout +
", idleEndpointTimeout=" + idleEndpointTimeout +
", maxChannelsPerEndpoint=" + maxConnectionsPerEndpoint +
", maxRequestsPerChannel=" + maxRequestsPerConnection +
'}';
} | ", maxRequestsPerChannel=" + maxRequestsPerConnection + | public String toString() {
return "ConnectionPolicy{" +
"requestTimeout=" + requestTimeout +
", connectionMode=" + connectionMode +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", userAgentSuffix='" + userAgentSuffix + '\'' +
", throttlingRetryOptions=" + throttlingRetryOptions +
", endpointDiscoveryEnabled=" + endpointDiscoveryEnabled +
", preferredRegions=" + preferredRegions +
", multipleWriteRegionsEnabled=" + multipleWriteRegionsEnabled +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
", readRequestsFallbackEnabled=" + readRequestsFallbackEnabled +
", connectionTimeout=" + connectionTimeout +
", idleEndpointTimeout=" + idleEndpointTimeout +
", maxConnectionsPerEndpoint=" + maxConnectionsPerEndpoint +
", maxRequestsPerConnection=" + maxRequestsPerConnection +
'}';
} | class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return the ConnectionPolicy.
* @throws IllegalArgumentException thrown if an error occurs
*/
public ConnectionPolicy setThrottlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
if (throttlingRetryOptions == null) {
throw new IllegalArgumentException("retryOptions value must not be null.");
}
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
} | class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return the ConnectionPolicy.
* @throws IllegalArgumentException thrown if an error occurs
*/
public ConnectionPolicy setThrottlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
if (throttlingRetryOptions == null) {
throw new IllegalArgumentException("retryOptions value must not be null.");
}
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
} |
another good catch. change made on my branch. to be committed with this PR. | public String toString() {
return "ConnectionPolicy{" +
"requestTimeout=" + requestTimeout +
", connectionMode=" + connectionMode +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", userAgentSuffix='" + userAgentSuffix + '\'' +
", throttlingRetryOptions=" + throttlingRetryOptions +
", endpointDiscoveryEnabled=" + endpointDiscoveryEnabled +
", preferredRegions=" + preferredRegions +
", multipleWriteRegionsEnabled=" + multipleWriteRegionsEnabled +
", inetSocketProxyAddress=" + inetSocketProxyAddress +
", readRequestsFallbackEnabled=" + readRequestsFallbackEnabled +
", connectionTimeout=" + connectionTimeout +
", idleEndpointTimeout=" + idleEndpointTimeout +
", maxChannelsPerEndpoint=" + maxConnectionsPerEndpoint +
", maxRequestsPerChannel=" + maxRequestsPerConnection +
'}';
} | ", maxRequestsPerChannel=" + maxRequestsPerConnection + | public String toString() {
return "ConnectionPolicy{" +
"requestTimeout=" + requestTimeout +
", connectionMode=" + connectionMode +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", userAgentSuffix='" + userAgentSuffix + '\'' +
", throttlingRetryOptions=" + throttlingRetryOptions +
", endpointDiscoveryEnabled=" + endpointDiscoveryEnabled +
", preferredRegions=" + preferredRegions +
", multipleWriteRegionsEnabled=" + multipleWriteRegionsEnabled +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
", readRequestsFallbackEnabled=" + readRequestsFallbackEnabled +
", connectionTimeout=" + connectionTimeout +
", idleEndpointTimeout=" + idleEndpointTimeout +
", maxConnectionsPerEndpoint=" + maxConnectionsPerEndpoint +
", maxRequestsPerConnection=" + maxRequestsPerConnection +
'}';
} | class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return the ConnectionPolicy.
* @throws IllegalArgumentException thrown if an error occurs
*/
public ConnectionPolicy setThrottlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
if (throttlingRetryOptions == null) {
throw new IllegalArgumentException("retryOptions value must not be null.");
}
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
} | class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return the ConnectionPolicy.
* @throws IllegalArgumentException thrown if an error occurs
*/
public ConnectionPolicy setThrottlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
if (throttlingRetryOptions == null) {
throw new IllegalArgumentException("retryOptions value must not be null.");
}
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
} |
These exceptions should be logged before throwing. | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray.");
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull.");
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject.");
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonValue) {
return ((GsonJsonValue) jsonNode).getJsonPrimitive();
}
throw new IllegalArgumentException("JsonNode is a value but isn't GsonJsonValue.");
}
throw new IllegalArgumentException("Unknown JsonNode type.");
} | throw new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."); | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray."));
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull."));
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."));
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonPrimitive) {
return ((GsonJsonPrimitive) jsonNode).getJsonPrimitive();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a value but isn't GsonJsonPrimitive."));
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonNode type."));
} | class JsonNodeUtils {
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return new GsonJsonNull(jsonElement.getAsJsonNull());
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonValue(jsonElement.getAsJsonPrimitive());
}
throw new IllegalArgumentException("Unknown JsonElement type.");
}
} | class JsonNodeUtils {
private static final ClientLogger LOGGER = new ClientLogger(JsonNodeUtils.class);
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return GsonJsonNull.INSTANCE;
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonPrimitive(jsonElement.getAsJsonPrimitive());
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonElement type."));
}
} |
Add some tests to include UTF-8 chars. | private static Stream<Arguments> toGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
return Stream.of(
Arguments.of(new GsonJsonArray(jsonArray), jsonArray),
Arguments.of(new GsonJsonArray(), jsonArray),
Arguments.of(new GsonJsonNull(jsonNull), jsonNull),
Arguments.of(new GsonJsonNull(), jsonNull),
Arguments.of(new GsonJsonObject(jsonObject), jsonObject),
Arguments.of(new GsonJsonObject(), jsonObject),
Arguments.of(new GsonJsonValue(booleanNode), booleanNode),
Arguments.of(new GsonJsonValue(true), booleanNode),
Arguments.of(new GsonJsonValue(doubleNode), doubleNode),
Arguments.of(new GsonJsonValue(42D), doubleNode),
Arguments.of(new GsonJsonValue(floatNode), floatNode),
Arguments.of(new GsonJsonValue(42F), floatNode),
Arguments.of(new GsonJsonValue(intNode), intNode),
Arguments.of(new GsonJsonValue(42), intNode),
Arguments.of(new GsonJsonValue(longNode), longNode),
Arguments.of(new GsonJsonValue(42L), longNode),
Arguments.of(new GsonJsonValue(shortNode), shortNode),
Arguments.of(new GsonJsonValue((short) 42), shortNode),
Arguments.of(new GsonJsonValue(textNode), textNode),
Arguments.of(new GsonJsonValue("42"), textNode)
);
} | Arguments.of(new GsonJsonValue("42"), textNode) | private static Stream<Arguments> toGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
JsonPrimitive utf8TextNode = new JsonPrimitive("\uD83D\uDE03");
return Stream.of(
Arguments.of(new GsonJsonArray(jsonArray), jsonArray),
Arguments.of(new GsonJsonArray(), jsonArray),
Arguments.of(GsonJsonNull.INSTANCE, jsonNull),
Arguments.of(new GsonJsonObject(jsonObject), jsonObject),
Arguments.of(new GsonJsonObject(), jsonObject),
Arguments.of(new GsonJsonPrimitive(booleanNode), booleanNode),
Arguments.of(new GsonJsonPrimitive(true), booleanNode),
Arguments.of(new GsonJsonPrimitive(doubleNode), doubleNode),
Arguments.of(new GsonJsonPrimitive(42D), doubleNode),
Arguments.of(new GsonJsonPrimitive(floatNode), floatNode),
Arguments.of(new GsonJsonPrimitive(42F), floatNode),
Arguments.of(new GsonJsonPrimitive(intNode), intNode),
Arguments.of(new GsonJsonPrimitive(42), intNode),
Arguments.of(new GsonJsonPrimitive(longNode), longNode),
Arguments.of(new GsonJsonPrimitive(42L), longNode),
Arguments.of(new GsonJsonPrimitive(shortNode), shortNode),
Arguments.of(new GsonJsonPrimitive((short) 42), shortNode),
Arguments.of(new GsonJsonPrimitive(textNode), textNode),
Arguments.of(new GsonJsonPrimitive("42"), textNode),
Arguments.of(new GsonJsonPrimitive(utf8TextNode), utf8TextNode),
Arguments.of(new GsonJsonPrimitive("\uD83D\uDE03"), utf8TextNode)
);
} | class JsonNodeUtilsTests {
@AfterEach
public void cleanupInlineMocks() {
Mockito.framework().clearInlineMocks();
}
@ParameterizedTest
@MethodSource("toGsonElementSupplier")
public void toGsonElement(JsonNode jsonNode, JsonElement expected) {
assertEquals(expected, JsonNodeUtils.toGsonElement(jsonNode));
}
@ParameterizedTest
@MethodSource("invalidToGsonElementSupplier")
public void invalidToGsonElement(JsonNode jsonNode) {
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.toGsonElement(jsonNode));
}
private static Stream<Arguments> invalidToGsonElementSupplier() {
JsonNode unknownNode = mock(JsonNode.class);
when(unknownNode.isArray()).thenReturn(false);
when(unknownNode.isNull()).thenReturn(false);
when(unknownNode.isObject()).thenReturn(false);
when(unknownNode.isValue()).thenReturn(false);
JsonNode unexpectedArray = mock(JsonNode.class);
when(unexpectedArray.isArray()).thenReturn(true);
when(unexpectedArray.isNull()).thenReturn(false);
when(unexpectedArray.isObject()).thenReturn(false);
when(unexpectedArray.isValue()).thenReturn(false);
JsonNode unexpectedNull = mock(JsonNode.class);
when(unexpectedNull.isArray()).thenReturn(false);
when(unexpectedNull.isNull()).thenReturn(true);
when(unexpectedNull.isObject()).thenReturn(false);
when(unexpectedNull.isValue()).thenReturn(false);
JsonNode unexpectedObject = mock(JsonNode.class);
when(unexpectedObject.isArray()).thenReturn(false);
when(unexpectedObject.isNull()).thenReturn(false);
when(unexpectedObject.isObject()).thenReturn(true);
when(unexpectedObject.isValue()).thenReturn(false);
JsonNode unexpectedValue = mock(JsonNode.class);
when(unexpectedValue.isArray()).thenReturn(false);
when(unexpectedValue.isNull()).thenReturn(false);
when(unexpectedValue.isObject()).thenReturn(false);
when(unexpectedValue.isValue()).thenReturn(true);
return Stream.of(
Arguments.of(unknownNode),
Arguments.of(unexpectedArray),
Arguments.of(unexpectedNull),
Arguments.of(unexpectedObject),
Arguments.of(unexpectedValue)
);
}
@ParameterizedTest
@MethodSource("fromGsonElementSupplier")
public void fromGsonElement(JsonElement jsonElement, JsonNode expected) {
assertEquals(expected, JsonNodeUtils.fromGsonElement(jsonElement));
}
private static Stream<Arguments> fromGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
return Stream.of(
Arguments.of(jsonArray, new GsonJsonArray(jsonArray)),
Arguments.of(jsonNull, new GsonJsonNull(jsonNull)),
Arguments.of(jsonObject, new GsonJsonObject(jsonObject)),
Arguments.of(booleanNode, new GsonJsonValue(booleanNode)),
Arguments.of(doubleNode, new GsonJsonValue(doubleNode)),
Arguments.of(floatNode, new GsonJsonValue(floatNode)),
Arguments.of(intNode, new GsonJsonValue(intNode)),
Arguments.of(longNode, new GsonJsonValue(longNode)),
Arguments.of(shortNode, new GsonJsonValue(shortNode)),
Arguments.of(textNode, new GsonJsonValue(textNode))
);
}
@Test
public void invalidFromGsonElement() {
JsonElement unknownNode = mock(JsonElement.class);
when(unknownNode.isJsonArray()).thenReturn(false);
when(unknownNode.isJsonNull()).thenReturn(false);
when(unknownNode.isJsonObject()).thenReturn(false);
when(unknownNode.isJsonPrimitive()).thenReturn(false);
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.fromGsonElement(unknownNode));
}
} | class JsonNodeUtilsTests {
@AfterEach
public void cleanupInlineMocks() {
Mockito.framework().clearInlineMocks();
}
@ParameterizedTest
@MethodSource("toGsonElementSupplier")
public void toGsonElement(JsonNode jsonNode, JsonElement expected) {
assertEquals(expected, JsonNodeUtils.toGsonElement(jsonNode));
}
@ParameterizedTest
@MethodSource("invalidToGsonElementSupplier")
public void invalidToGsonElement(JsonNode jsonNode) {
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.toGsonElement(jsonNode));
}
private static Stream<Arguments> invalidToGsonElementSupplier() {
JsonNode unknownNode = mock(JsonNode.class);
when(unknownNode.isArray()).thenReturn(false);
when(unknownNode.isNull()).thenReturn(false);
when(unknownNode.isObject()).thenReturn(false);
when(unknownNode.isValue()).thenReturn(false);
JsonNode unexpectedArray = mock(JsonNode.class);
when(unexpectedArray.isArray()).thenReturn(true);
when(unexpectedArray.isNull()).thenReturn(false);
when(unexpectedArray.isObject()).thenReturn(false);
when(unexpectedArray.isValue()).thenReturn(false);
JsonNode unexpectedNull = mock(JsonNode.class);
when(unexpectedNull.isArray()).thenReturn(false);
when(unexpectedNull.isNull()).thenReturn(true);
when(unexpectedNull.isObject()).thenReturn(false);
when(unexpectedNull.isValue()).thenReturn(false);
JsonNode unexpectedObject = mock(JsonNode.class);
when(unexpectedObject.isArray()).thenReturn(false);
when(unexpectedObject.isNull()).thenReturn(false);
when(unexpectedObject.isObject()).thenReturn(true);
when(unexpectedObject.isValue()).thenReturn(false);
JsonNode unexpectedValue = mock(JsonNode.class);
when(unexpectedValue.isArray()).thenReturn(false);
when(unexpectedValue.isNull()).thenReturn(false);
when(unexpectedValue.isObject()).thenReturn(false);
when(unexpectedValue.isValue()).thenReturn(true);
return Stream.of(
Arguments.of(unknownNode),
Arguments.of(unexpectedArray),
Arguments.of(unexpectedNull),
Arguments.of(unexpectedObject),
Arguments.of(unexpectedValue)
);
}
@ParameterizedTest
@MethodSource("fromGsonElementSupplier")
public void fromGsonElement(JsonElement jsonElement, JsonNode expected) {
assertEquals(expected, JsonNodeUtils.fromGsonElement(jsonElement));
}
private static Stream<Arguments> fromGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
JsonPrimitive utf8TextNode = new JsonPrimitive("\uD83D\uDE03");
return Stream.of(
Arguments.of(jsonArray, new GsonJsonArray(jsonArray)),
Arguments.of(jsonNull, GsonJsonNull.INSTANCE),
Arguments.of(jsonObject, new GsonJsonObject(jsonObject)),
Arguments.of(booleanNode, new GsonJsonPrimitive(booleanNode)),
Arguments.of(doubleNode, new GsonJsonPrimitive(doubleNode)),
Arguments.of(floatNode, new GsonJsonPrimitive(floatNode)),
Arguments.of(intNode, new GsonJsonPrimitive(intNode)),
Arguments.of(longNode, new GsonJsonPrimitive(longNode)),
Arguments.of(shortNode, new GsonJsonPrimitive(shortNode)),
Arguments.of(textNode, new GsonJsonPrimitive(textNode)),
Arguments.of(utf8TextNode, new GsonJsonPrimitive(utf8TextNode))
);
}
@Test
public void invalidFromGsonElement() {
JsonElement unknownNode = mock(JsonElement.class);
when(unknownNode.isJsonArray()).thenReturn(false);
when(unknownNode.isJsonNull()).thenReturn(false);
when(unknownNode.isJsonObject()).thenReturn(false);
when(unknownNode.isJsonPrimitive()).thenReturn(false);
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.fromGsonElement(unknownNode));
}
} |
Will add logging before throwing. | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray.");
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull.");
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject.");
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonValue) {
return ((GsonJsonValue) jsonNode).getJsonPrimitive();
}
throw new IllegalArgumentException("JsonNode is a value but isn't GsonJsonValue.");
}
throw new IllegalArgumentException("Unknown JsonNode type.");
} | throw new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."); | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray."));
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull."));
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."));
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonPrimitive) {
return ((GsonJsonPrimitive) jsonNode).getJsonPrimitive();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a value but isn't GsonJsonPrimitive."));
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonNode type."));
} | class JsonNodeUtils {
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return new GsonJsonNull(jsonElement.getAsJsonNull());
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonValue(jsonElement.getAsJsonPrimitive());
}
throw new IllegalArgumentException("Unknown JsonElement type.");
}
} | class JsonNodeUtils {
private static final ClientLogger LOGGER = new ClientLogger(JsonNodeUtils.class);
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return GsonJsonNull.INSTANCE;
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonPrimitive(jsonElement.getAsJsonPrimitive());
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonElement type."));
}
} |
Added | private static Stream<Arguments> toGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
return Stream.of(
Arguments.of(new GsonJsonArray(jsonArray), jsonArray),
Arguments.of(new GsonJsonArray(), jsonArray),
Arguments.of(new GsonJsonNull(jsonNull), jsonNull),
Arguments.of(new GsonJsonNull(), jsonNull),
Arguments.of(new GsonJsonObject(jsonObject), jsonObject),
Arguments.of(new GsonJsonObject(), jsonObject),
Arguments.of(new GsonJsonValue(booleanNode), booleanNode),
Arguments.of(new GsonJsonValue(true), booleanNode),
Arguments.of(new GsonJsonValue(doubleNode), doubleNode),
Arguments.of(new GsonJsonValue(42D), doubleNode),
Arguments.of(new GsonJsonValue(floatNode), floatNode),
Arguments.of(new GsonJsonValue(42F), floatNode),
Arguments.of(new GsonJsonValue(intNode), intNode),
Arguments.of(new GsonJsonValue(42), intNode),
Arguments.of(new GsonJsonValue(longNode), longNode),
Arguments.of(new GsonJsonValue(42L), longNode),
Arguments.of(new GsonJsonValue(shortNode), shortNode),
Arguments.of(new GsonJsonValue((short) 42), shortNode),
Arguments.of(new GsonJsonValue(textNode), textNode),
Arguments.of(new GsonJsonValue("42"), textNode)
);
} | Arguments.of(new GsonJsonValue("42"), textNode) | private static Stream<Arguments> toGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
JsonPrimitive utf8TextNode = new JsonPrimitive("\uD83D\uDE03");
return Stream.of(
Arguments.of(new GsonJsonArray(jsonArray), jsonArray),
Arguments.of(new GsonJsonArray(), jsonArray),
Arguments.of(GsonJsonNull.INSTANCE, jsonNull),
Arguments.of(new GsonJsonObject(jsonObject), jsonObject),
Arguments.of(new GsonJsonObject(), jsonObject),
Arguments.of(new GsonJsonPrimitive(booleanNode), booleanNode),
Arguments.of(new GsonJsonPrimitive(true), booleanNode),
Arguments.of(new GsonJsonPrimitive(doubleNode), doubleNode),
Arguments.of(new GsonJsonPrimitive(42D), doubleNode),
Arguments.of(new GsonJsonPrimitive(floatNode), floatNode),
Arguments.of(new GsonJsonPrimitive(42F), floatNode),
Arguments.of(new GsonJsonPrimitive(intNode), intNode),
Arguments.of(new GsonJsonPrimitive(42), intNode),
Arguments.of(new GsonJsonPrimitive(longNode), longNode),
Arguments.of(new GsonJsonPrimitive(42L), longNode),
Arguments.of(new GsonJsonPrimitive(shortNode), shortNode),
Arguments.of(new GsonJsonPrimitive((short) 42), shortNode),
Arguments.of(new GsonJsonPrimitive(textNode), textNode),
Arguments.of(new GsonJsonPrimitive("42"), textNode),
Arguments.of(new GsonJsonPrimitive(utf8TextNode), utf8TextNode),
Arguments.of(new GsonJsonPrimitive("\uD83D\uDE03"), utf8TextNode)
);
} | class JsonNodeUtilsTests {
@AfterEach
public void cleanupInlineMocks() {
Mockito.framework().clearInlineMocks();
}
@ParameterizedTest
@MethodSource("toGsonElementSupplier")
public void toGsonElement(JsonNode jsonNode, JsonElement expected) {
assertEquals(expected, JsonNodeUtils.toGsonElement(jsonNode));
}
@ParameterizedTest
@MethodSource("invalidToGsonElementSupplier")
public void invalidToGsonElement(JsonNode jsonNode) {
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.toGsonElement(jsonNode));
}
private static Stream<Arguments> invalidToGsonElementSupplier() {
JsonNode unknownNode = mock(JsonNode.class);
when(unknownNode.isArray()).thenReturn(false);
when(unknownNode.isNull()).thenReturn(false);
when(unknownNode.isObject()).thenReturn(false);
when(unknownNode.isValue()).thenReturn(false);
JsonNode unexpectedArray = mock(JsonNode.class);
when(unexpectedArray.isArray()).thenReturn(true);
when(unexpectedArray.isNull()).thenReturn(false);
when(unexpectedArray.isObject()).thenReturn(false);
when(unexpectedArray.isValue()).thenReturn(false);
JsonNode unexpectedNull = mock(JsonNode.class);
when(unexpectedNull.isArray()).thenReturn(false);
when(unexpectedNull.isNull()).thenReturn(true);
when(unexpectedNull.isObject()).thenReturn(false);
when(unexpectedNull.isValue()).thenReturn(false);
JsonNode unexpectedObject = mock(JsonNode.class);
when(unexpectedObject.isArray()).thenReturn(false);
when(unexpectedObject.isNull()).thenReturn(false);
when(unexpectedObject.isObject()).thenReturn(true);
when(unexpectedObject.isValue()).thenReturn(false);
JsonNode unexpectedValue = mock(JsonNode.class);
when(unexpectedValue.isArray()).thenReturn(false);
when(unexpectedValue.isNull()).thenReturn(false);
when(unexpectedValue.isObject()).thenReturn(false);
when(unexpectedValue.isValue()).thenReturn(true);
return Stream.of(
Arguments.of(unknownNode),
Arguments.of(unexpectedArray),
Arguments.of(unexpectedNull),
Arguments.of(unexpectedObject),
Arguments.of(unexpectedValue)
);
}
@ParameterizedTest
@MethodSource("fromGsonElementSupplier")
public void fromGsonElement(JsonElement jsonElement, JsonNode expected) {
assertEquals(expected, JsonNodeUtils.fromGsonElement(jsonElement));
}
private static Stream<Arguments> fromGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
return Stream.of(
Arguments.of(jsonArray, new GsonJsonArray(jsonArray)),
Arguments.of(jsonNull, new GsonJsonNull(jsonNull)),
Arguments.of(jsonObject, new GsonJsonObject(jsonObject)),
Arguments.of(booleanNode, new GsonJsonValue(booleanNode)),
Arguments.of(doubleNode, new GsonJsonValue(doubleNode)),
Arguments.of(floatNode, new GsonJsonValue(floatNode)),
Arguments.of(intNode, new GsonJsonValue(intNode)),
Arguments.of(longNode, new GsonJsonValue(longNode)),
Arguments.of(shortNode, new GsonJsonValue(shortNode)),
Arguments.of(textNode, new GsonJsonValue(textNode))
);
}
@Test
public void invalidFromGsonElement() {
JsonElement unknownNode = mock(JsonElement.class);
when(unknownNode.isJsonArray()).thenReturn(false);
when(unknownNode.isJsonNull()).thenReturn(false);
when(unknownNode.isJsonObject()).thenReturn(false);
when(unknownNode.isJsonPrimitive()).thenReturn(false);
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.fromGsonElement(unknownNode));
}
} | class JsonNodeUtilsTests {
@AfterEach
public void cleanupInlineMocks() {
Mockito.framework().clearInlineMocks();
}
@ParameterizedTest
@MethodSource("toGsonElementSupplier")
public void toGsonElement(JsonNode jsonNode, JsonElement expected) {
assertEquals(expected, JsonNodeUtils.toGsonElement(jsonNode));
}
@ParameterizedTest
@MethodSource("invalidToGsonElementSupplier")
public void invalidToGsonElement(JsonNode jsonNode) {
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.toGsonElement(jsonNode));
}
private static Stream<Arguments> invalidToGsonElementSupplier() {
JsonNode unknownNode = mock(JsonNode.class);
when(unknownNode.isArray()).thenReturn(false);
when(unknownNode.isNull()).thenReturn(false);
when(unknownNode.isObject()).thenReturn(false);
when(unknownNode.isValue()).thenReturn(false);
JsonNode unexpectedArray = mock(JsonNode.class);
when(unexpectedArray.isArray()).thenReturn(true);
when(unexpectedArray.isNull()).thenReturn(false);
when(unexpectedArray.isObject()).thenReturn(false);
when(unexpectedArray.isValue()).thenReturn(false);
JsonNode unexpectedNull = mock(JsonNode.class);
when(unexpectedNull.isArray()).thenReturn(false);
when(unexpectedNull.isNull()).thenReturn(true);
when(unexpectedNull.isObject()).thenReturn(false);
when(unexpectedNull.isValue()).thenReturn(false);
JsonNode unexpectedObject = mock(JsonNode.class);
when(unexpectedObject.isArray()).thenReturn(false);
when(unexpectedObject.isNull()).thenReturn(false);
when(unexpectedObject.isObject()).thenReturn(true);
when(unexpectedObject.isValue()).thenReturn(false);
JsonNode unexpectedValue = mock(JsonNode.class);
when(unexpectedValue.isArray()).thenReturn(false);
when(unexpectedValue.isNull()).thenReturn(false);
when(unexpectedValue.isObject()).thenReturn(false);
when(unexpectedValue.isValue()).thenReturn(true);
return Stream.of(
Arguments.of(unknownNode),
Arguments.of(unexpectedArray),
Arguments.of(unexpectedNull),
Arguments.of(unexpectedObject),
Arguments.of(unexpectedValue)
);
}
@ParameterizedTest
@MethodSource("fromGsonElementSupplier")
public void fromGsonElement(JsonElement jsonElement, JsonNode expected) {
assertEquals(expected, JsonNodeUtils.fromGsonElement(jsonElement));
}
private static Stream<Arguments> fromGsonElementSupplier() {
JsonArray jsonArray = new JsonArray();
JsonNull jsonNull = JsonNull.INSTANCE;
JsonObject jsonObject = new JsonObject();
JsonPrimitive booleanNode = new JsonPrimitive(true);
JsonPrimitive doubleNode = new JsonPrimitive(42D);
JsonPrimitive floatNode = new JsonPrimitive(42F);
JsonPrimitive intNode = new JsonPrimitive(42);
JsonPrimitive longNode = new JsonPrimitive(42L);
JsonPrimitive shortNode = new JsonPrimitive((short) 42);
JsonPrimitive textNode = new JsonPrimitive("42");
JsonPrimitive utf8TextNode = new JsonPrimitive("\uD83D\uDE03");
return Stream.of(
Arguments.of(jsonArray, new GsonJsonArray(jsonArray)),
Arguments.of(jsonNull, GsonJsonNull.INSTANCE),
Arguments.of(jsonObject, new GsonJsonObject(jsonObject)),
Arguments.of(booleanNode, new GsonJsonPrimitive(booleanNode)),
Arguments.of(doubleNode, new GsonJsonPrimitive(doubleNode)),
Arguments.of(floatNode, new GsonJsonPrimitive(floatNode)),
Arguments.of(intNode, new GsonJsonPrimitive(intNode)),
Arguments.of(longNode, new GsonJsonPrimitive(longNode)),
Arguments.of(shortNode, new GsonJsonPrimitive(shortNode)),
Arguments.of(textNode, new GsonJsonPrimitive(textNode)),
Arguments.of(utf8TextNode, new GsonJsonPrimitive(utf8TextNode))
);
}
@Test
public void invalidFromGsonElement() {
JsonElement unknownNode = mock(JsonElement.class);
when(unknownNode.isJsonArray()).thenReturn(false);
when(unknownNode.isJsonNull()).thenReturn(false);
when(unknownNode.isJsonObject()).thenReturn(false);
when(unknownNode.isJsonPrimitive()).thenReturn(false);
assertThrows(IllegalArgumentException.class, () -> JsonNodeUtils.fromGsonElement(unknownNode));
}
} |
nit; GsonJsonValue -> GsonJsonPrimitive | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray."));
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull."));
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."));
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonPrimitive) {
return ((GsonJsonPrimitive) jsonNode).getJsonPrimitive();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a value but isn't GsonJsonValue."));
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonNode type."));
} | new IllegalArgumentException("JsonNode is a value but isn't GsonJsonValue.")); | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray."));
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull."));
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."));
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonPrimitive) {
return ((GsonJsonPrimitive) jsonNode).getJsonPrimitive();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a value but isn't GsonJsonPrimitive."));
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonNode type."));
} | class JsonNodeUtils {
private static final ClientLogger LOGGER = new ClientLogger(JsonNodeUtils.class);
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return GsonJsonNull.INSTANCE;
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonPrimitive(jsonElement.getAsJsonPrimitive());
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonElement type."));
}
} | class JsonNodeUtils {
private static final ClientLogger LOGGER = new ClientLogger(JsonNodeUtils.class);
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return GsonJsonNull.INSTANCE;
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonPrimitive(jsonElement.getAsJsonPrimitive());
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonElement type."));
}
} |
Good catch | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray."));
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull."));
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."));
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonPrimitive) {
return ((GsonJsonPrimitive) jsonNode).getJsonPrimitive();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a value but isn't GsonJsonValue."));
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonNode type."));
} | new IllegalArgumentException("JsonNode is a value but isn't GsonJsonValue.")); | public static JsonElement toGsonElement(JsonNode jsonNode) {
if (jsonNode.isArray()) {
if (jsonNode instanceof GsonJsonArray) {
return ((GsonJsonArray) jsonNode).getJsonArray();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonArray."));
} else if (jsonNode.isNull()) {
if (jsonNode instanceof GsonJsonNull) {
return ((GsonJsonNull) jsonNode).getJsonNull();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a null but isn't GsonJsonNull."));
} else if (jsonNode.isObject()) {
if (jsonNode instanceof GsonJsonObject) {
return ((GsonJsonObject) jsonNode).getJsonObject();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is an array but isn't GsonJsonObject."));
} else if (jsonNode.isValue()) {
if (jsonNode instanceof GsonJsonPrimitive) {
return ((GsonJsonPrimitive) jsonNode).getJsonPrimitive();
}
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("JsonNode is a value but isn't GsonJsonPrimitive."));
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonNode type."));
} | class JsonNodeUtils {
private static final ClientLogger LOGGER = new ClientLogger(JsonNodeUtils.class);
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return GsonJsonNull.INSTANCE;
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonPrimitive(jsonElement.getAsJsonPrimitive());
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonElement type."));
}
} | class JsonNodeUtils {
private static final ClientLogger LOGGER = new ClientLogger(JsonNodeUtils.class);
/**
* Converts an Azure Core {@link JsonNode} into a GSON {@link JsonElement}.
*
* @param jsonNode The Azure Core {@link JsonNode}.
* @return The corresponding GSON {@link JsonElement}.
* @throws IllegalArgumentException If the {@link JsonNode} cannot be converted to a {@link JsonElement}.
*/
/**
* Converts an GSON {@link JsonElement} into an Azure Core {@link JsonNode}.
*
* @param jsonElement The GSON {@link JsonElement}.
* @return The corresponding Azure Core {@link JsonNode}.
* @throws IllegalArgumentException If the {@link JsonElement} cannot be converted to a {@link JsonNode}.
*/
public static JsonNode fromGsonElement(JsonElement jsonElement) {
if (jsonElement.isJsonArray()) {
return new GsonJsonArray(jsonElement.getAsJsonArray());
} else if (jsonElement.isJsonNull()) {
return GsonJsonNull.INSTANCE;
} else if (jsonElement.isJsonObject()) {
return new GsonJsonObject(jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonPrimitive()) {
return new GsonJsonPrimitive(jsonElement.getAsJsonPrimitive());
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown JsonElement type."));
}
} |
Ok, `com.google.gson.JsonArray::get` return `JsonNull.INSTANCE` for null value. In azure-core `JacksonJsonArray::has` we use native Jackson::JsonArray::[has](https://fasterxml.github.io/jackson-databind/javadoc/2.6/com/fasterxml/jackson/databind/JsonNode.html#has-int-)(int). As per doc that native method return `true` even if the stored value is `null`. So our impls has the same behavior, which is good. | public boolean has(int index) {
if (index < 0 || index >= jsonArray.size()) {
return false;
}
return jsonArray.get(index) != null;
} | return jsonArray.get(index) != null; | public boolean has(int index) {
if (index < 0 || index >= jsonArray.size()) {
return false;
}
return jsonArray.get(index) != null;
} | class GsonJsonArray implements JsonArray {
private final ClientLogger logger = new ClientLogger(GsonJsonArray.class);
private final com.google.gson.JsonArray jsonArray;
/**
* Constructs a {@link JsonArray} backed by an empty GSON {@link com.google.gson.JsonArray}.
*/
public GsonJsonArray() {
this.jsonArray = new com.google.gson.JsonArray();
}
/**
* Constructs a {@link JsonArray} backed by the passed GSON {@link com.google.gson.JsonArray}.
*
* @param jsonArray The backing GSON {@link com.google.gson.JsonArray}.
* @throws NullPointerException If {@code jsonArray} is {@code null}.
*/
public GsonJsonArray(com.google.gson.JsonArray jsonArray) {
this.jsonArray = Objects.requireNonNull(jsonArray, "'jsonArray' cannot be null.");
}
com.google.gson.JsonArray getJsonArray() {
return jsonArray;
}
@Override
public JsonArray add(JsonNode jsonNode) {
jsonArray.add(JsonNodeUtils.toGsonElement(jsonNode));
return this;
}
@Override
public JsonArray clear() {
int size = jsonArray.size();
for (int i = 0; i < size; i++) {
jsonArray.remove(0);
}
return this;
}
@Override
public Stream<JsonNode> elements() {
Spliterator<JsonElement> spliterator = Spliterators
.spliteratorUnknownSize(jsonArray.iterator(), Spliterator.ORDERED);
return StreamSupport.stream(spliterator, false).map(JsonNodeUtils::fromGsonElement);
}
@Override
public JsonNode get(int index) {
checkBounds(index);
JsonElement jsonElement = jsonArray.get(index);
return (jsonElement == null) ? null : JsonNodeUtils.fromGsonElement(jsonElement);
}
@Override
@Override
public JsonNode remove(int index) {
checkBounds(index);
JsonElement jsonElement = jsonArray.remove(index);
return (jsonElement == null) ? null : JsonNodeUtils.fromGsonElement(jsonElement);
}
@Override
public JsonNode set(int index, JsonNode jsonNode) {
checkBounds(index);
JsonElement jsonElement = jsonArray.set(index, JsonNodeUtils.toGsonElement(jsonNode));
return (jsonElement == null) ? null : JsonNodeUtils.fromGsonElement(jsonElement);
}
@Override
public int size() {
return jsonArray.size();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof GsonJsonArray)) {
return false;
}
return Objects.equals(jsonArray, ((GsonJsonArray) obj).jsonArray);
}
@Override
public int hashCode() {
return jsonArray.hashCode();
}
private void checkBounds(int index) {
if (index < 0 || index >= size()) {
throw logger.logExceptionAsError(new IndexOutOfBoundsException("'index' must be between 0 and size()."));
}
}
} | class GsonJsonArray implements JsonArray {
private final ClientLogger logger = new ClientLogger(GsonJsonArray.class);
private final com.google.gson.JsonArray jsonArray;
/**
* Constructs a {@link JsonArray} backed by an empty GSON {@link com.google.gson.JsonArray}.
*/
public GsonJsonArray() {
this.jsonArray = new com.google.gson.JsonArray();
}
/**
* Constructs a {@link JsonArray} backed by the passed GSON {@link com.google.gson.JsonArray}.
*
* @param jsonArray The backing GSON {@link com.google.gson.JsonArray}.
* @throws NullPointerException If {@code jsonArray} is {@code null}.
*/
public GsonJsonArray(com.google.gson.JsonArray jsonArray) {
this.jsonArray = Objects.requireNonNull(jsonArray, "'jsonArray' cannot be null.");
}
com.google.gson.JsonArray getJsonArray() {
return jsonArray;
}
@Override
public JsonArray add(JsonNode jsonNode) {
jsonArray.add(JsonNodeUtils.toGsonElement(jsonNode));
return this;
}
@Override
public JsonArray clear() {
int size = jsonArray.size();
for (int i = 0; i < size; i++) {
jsonArray.remove(0);
}
return this;
}
@Override
public Stream<JsonNode> elements() {
Spliterator<JsonElement> spliterator = Spliterators
.spliteratorUnknownSize(jsonArray.iterator(), Spliterator.ORDERED);
return StreamSupport.stream(spliterator, false).map(JsonNodeUtils::fromGsonElement);
}
@Override
public JsonNode get(int index) {
checkBounds(index);
JsonElement jsonElement = jsonArray.get(index);
return (jsonElement == null) ? null : JsonNodeUtils.fromGsonElement(jsonElement);
}
@Override
@Override
public JsonNode remove(int index) {
checkBounds(index);
JsonElement jsonElement = jsonArray.remove(index);
return (jsonElement == null) ? null : JsonNodeUtils.fromGsonElement(jsonElement);
}
@Override
public JsonNode set(int index, JsonNode jsonNode) {
checkBounds(index);
JsonElement jsonElement = jsonArray.set(index, JsonNodeUtils.toGsonElement(jsonNode));
return (jsonElement == null) ? null : JsonNodeUtils.fromGsonElement(jsonElement);
}
@Override
public int size() {
return jsonArray.size();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof GsonJsonArray)) {
return false;
}
return Objects.equals(jsonArray, ((GsonJsonArray) obj).jsonArray);
}
@Override
public int hashCode() {
return jsonArray.hashCode();
}
private void checkBounds(int index) {
if (index < 0 || index >= size()) {
throw logger.logExceptionAsError(new IndexOutOfBoundsException("'index' must be between 0 and size()."));
}
}
} |
Should we use our "utils" for that? | public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) {
try {
Objects.requireNonNull(options);
final Map<String, String> metadataFinal = options.getMetadata() == null
? new HashMap<>() : options.getMetadata();
options.setMetadata(metadataFinal);
Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer(
options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE)
: options.getDataFlux();
Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal);
return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df)
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions()).setTimeout(options.getTimeout())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
} | Objects.requireNonNull(options); | new BlobParallelUploadOptions(df)
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
} | class EncryptedBlobAsyncClient extends BlobAsyncClient {
static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB;
private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB;
private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class);
/**
* An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption.
*/
private final AsyncKeyEncryptionKey keyWrapper;
/**
* A {@link String} that is used to wrap/unwrap the content key during encryption.
*/
private final String keyWrapAlgorithm;
/**
* Package-private constructor for use by {@link EncryptedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param key The key used to encrypt and decrypt data.
* @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption.
* @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version.
*/
EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName,
String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey,
AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) {
super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey,
null, versionId);
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
}
/**
* Creates a new block blob. By default this method will not overwrite an existing blob.
* <p>
* Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported
* with this method; the content of the existing blob is overwritten with the new content. To perform a partial
* update of block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) {
try {
return this.upload(data, parallelTransferOptions, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob.
* <p>
* Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported
* with this method; the content of the existing blob is overwritten with the new content. To perform a partial
* update of block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param overwrite Whether or not to overwrite, should data exist on the blob.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions,
boolean overwrite) {
try {
Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null,
null).flatMap(FluxUtil::toMono);
if (overwrite) {
return uploadTask;
} else {
return exists()
.flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of
* the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* {@link BlockBlobAsyncClient
* see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data,
ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata,
AccessTier tier, BlobRequestConditions requestConditions) {
return this.uploadWithResponse(new BlobParallelUploadOptions(data)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(AccessTier.HOT).setRequestConditions(requestConditions));
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of
* the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* {@link BlockBlobAsyncClient
* see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param options {@link BlobParallelUploadOptions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) {
try {
Objects.requireNonNull(options);
final Map<String, String> metadataFinal = options.getMetadata() == null
? new HashMap<>() : options.getMetadata();
options.setMetadata(metadataFinal);
Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer(
options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE)
: options.getDataFlux();
Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal);
return dataFinal.flatMap(df -> super.uploadWithResponse(
}
/**
* Creates a new block blob with the content of the specified file. By default this method will not overwrite
* existing data
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @return An empty response
*/
@Override
public Mono<Void> uploadFromFile(String filePath) {
try {
return uploadFromFile(filePath, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param overwrite Whether or not to overwrite should data exist on the blob.
* @return An empty response
*/
@Override
public Mono<Void> uploadFromFile(String filePath, boolean overwrite) {
try {
Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null);
if (overwrite) {
return uploadTask;
} else {
return exists()
.flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return An empty response
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier,
BlobRequestConditions requestConditions) {
return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(tier).setRequestConditions(requestConditions))
.then();
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse
*
* @param options {@link BlobUploadFromFileOptions}
* @return A reactive response containing the information of the uploaded block blob.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger),
channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel))
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions()))
.doOnTerminate(() -> {
try {
channel.close();
} catch (IOException e) {
throw logger.logExceptionAsError(new UncheckedIOException(e));
}
}), channel -> UploadUtils.uploadFileCleanup(channel, logger));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts the given Flux ByteBuffer.
*
* @param plainTextFlux The Flux ByteBuffer to be encrypted.
*
* @return A {@link EncryptedBlob}
*
* @throws InvalidKeyException If the key provided is invalid
*/
Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException {
Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null");
try {
KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES);
keyGen.init(256);
Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING);
SecretKey aesKey = keyGen.generateKey();
cipher.init(Cipher.ENCRYPT_MODE, aesKey);
Map<String, String> keyWrappingMetadata = new HashMap<>();
keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY,
CryptographyConstants.AGENT_METADATA_VALUE);
return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded())
.map(encryptedKey -> {
WrappedKey wrappedKey = new WrappedKey(
this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm);
EncryptionData encryptionData = new EncryptionData()
.setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE)
.setEncryptionAgent(
new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1,
EncryptionAlgorithm.AES_CBC_256))
.setKeyWrappingMetadata(keyWrappingMetadata)
.setContentEncryptionIV(cipher.getIV())
.setWrappedContentKey(wrappedKey);
Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> {
int outputSize = cipher.getOutputSize(plainTextBuffer.remaining());
/*
This should be the only place we allocate memory in encryptBlob(). Although there is an
overload that can encrypt in place that would save allocations, we do not want to overwrite
customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable,
we should implement pooling.
*/
ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize);
int encryptedBytes;
try {
encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer);
} catch (ShortBufferException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
encryptedTextBuffer.position(0);
encryptedTextBuffer.limit(encryptedBytes);
return encryptedTextBuffer;
});
/*
Defer() ensures the contained code is not executed until the Flux is subscribed to, in
other words, cipher.doFinal() will not be called until the plainTextFlux has completed
and therefore all other data has been encrypted.
*/
encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> {
try {
return Flux.just(ByteBuffer.wrap(cipher.doFinal()));
} catch (GeneralSecurityException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
}));
return new EncryptedBlob(encryptionData, encryptedTextFlux);
});
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw logger.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Encrypt the blob and add the encryption metadata to the customer's metadata.
*
* @param plainText The data to encrypt
* @param metadata The customer's metadata to be updated.
*
* @return A Mono containing the cipher text
*/
private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText,
Map<String, String> metadata) {
try {
return this.encryptBlob(plainText)
.flatMap(encryptedBlob -> {
try {
metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY,
encryptedBlob.getEncryptionData().toJsonString());
return Mono.just(encryptedBlob.getCiphertextFlux());
} catch (JsonProcessingException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
});
} catch (InvalidKeyException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Flux<ByteBuffer> query(String expression) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
} | class EncryptedBlobAsyncClient extends BlobAsyncClient {
static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB;
private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB;
private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class);
/**
* An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption.
*/
private final AsyncKeyEncryptionKey keyWrapper;
/**
* A {@link String} that is used to wrap/unwrap the content key during encryption.
*/
private final String keyWrapAlgorithm;
/**
* Package-private constructor for use by {@link EncryptedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param key The key used to encrypt and decrypt data.
* @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption.
* @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version.
*/
EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName,
String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey,
AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) {
super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey,
null, versionId);
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
}
/**
* Creates a new block blob. By default this method will not overwrite an existing blob.
* <p>
* Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported
* with this method; the content of the existing blob is overwritten with the new content. To perform a partial
* update of block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) {
try {
return this.upload(data, parallelTransferOptions, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob.
* <p>
* Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported
* with this method; the content of the existing blob is overwritten with the new content. To perform a partial
* update of block blob's, use {@link BlockBlobAsyncClient
* BlockBlobAsyncClient
* <a href="https:
* <a href="https:
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param overwrite Whether or not to overwrite, should data exist on the blob.
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions,
boolean overwrite) {
try {
Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null,
null).flatMap(FluxUtil::toMono);
if (overwrite) {
return uploadTask;
} else {
return exists()
.flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of
* the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* {@link BlockBlobAsyncClient
* see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* @param data The data to write to the blob. Unlike other upload methods, this method does not require that the
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data,
ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata,
AccessTier tier, BlobRequestConditions requestConditions) {
return this.uploadWithResponse(new BlobParallelUploadOptions(data)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(AccessTier.HOT).setRequestConditions(requestConditions));
}
/**
* Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob
* overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of
* the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link
* BlockBlobAsyncClient
* {@link BlockBlobAsyncClient
* see the <a href="https:
* Docs for Put Block</a> and the <a href="https:
* Docs for Put Block List</a>.
* <p>
* The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when
* retries are enabled, and the length of the data need not be known in advance. Therefore, this method should
* support uploading any arbitrary data source, including network streams. This behavior is possible because this
* method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while
* this method may offer additional convenience, it will not be as performant as other options, which should be
* preferred when possible.
* <p>
* Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the
* data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The
* trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs
* for a given scenario.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse
*
* {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected
* to produce the same values across subscriptions.
* @param options {@link BlobParallelUploadOptions}
* @return A reactive response containing the information of the uploaded block blob.
*/
@Override
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
final Map<String, String> metadataFinal = options.getMetadata() == null
? new HashMap<>() : options.getMetadata();
Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer(
options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE)
: options.getDataFlux();
Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal);
return dataFinal.flatMap(df -> super.uploadWithResponse(
}
/**
* Creates a new block blob with the content of the specified file. By default this method will not overwrite
* existing data
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @return An empty response
*/
@Override
public Mono<Void> uploadFromFile(String filePath) {
try {
return uploadFromFile(filePath, false);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param overwrite Whether or not to overwrite should data exist on the blob.
* @return An empty response
*/
@Override
public Mono<Void> uploadFromFile(String filePath, boolean overwrite) {
try {
Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null);
if (overwrite) {
return uploadTask;
} else {
return exists()
.flatMap(exists -> exists
? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS))
: uploadTask);
}
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile
*
* @param filePath Path to the upload file
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file.
* @param headers {@link BlobHttpHeaders}
* @param metadata Metadata to associate with the blob.
* @param tier {@link AccessTier} for the destination blob.
* @param requestConditions {@link BlobRequestConditions}
* @return An empty response
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier,
BlobRequestConditions requestConditions) {
return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
.setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
.setTier(tier).setRequestConditions(requestConditions))
.then();
}
/**
* Creates a new block blob, or updates the content of an existing block blob, with the content of the specified
* file.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse
*
* @param options {@link BlobUploadFromFileOptions}
* @return A reactive response containing the information of the uploaded block blob.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB
* @throws UncheckedIOException If an I/O error occurs
*/
@Override
public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) {
try {
StorageImplUtils.assertNotNull("options", options);
return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger),
channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel))
.setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders())
.setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier())
.setRequestConditions(options.getRequestConditions()))
.doOnTerminate(() -> {
try {
channel.close();
} catch (IOException e) {
throw logger.logExceptionAsError(new UncheckedIOException(e));
}
}), channel -> UploadUtils.uploadFileCleanup(channel, logger));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts the given Flux ByteBuffer.
*
* @param plainTextFlux The Flux ByteBuffer to be encrypted.
*
* @return A {@link EncryptedBlob}
*
* @throws InvalidKeyException If the key provided is invalid
*/
Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException {
Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null");
try {
KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES);
keyGen.init(256);
Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING);
SecretKey aesKey = keyGen.generateKey();
cipher.init(Cipher.ENCRYPT_MODE, aesKey);
Map<String, String> keyWrappingMetadata = new HashMap<>();
keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY,
CryptographyConstants.AGENT_METADATA_VALUE);
return this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded())
.map(encryptedKey -> {
WrappedKey wrappedKey = new WrappedKey(
this.keyWrapper.getKeyId().block(), encryptedKey, keyWrapAlgorithm);
EncryptionData encryptionData = new EncryptionData()
.setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE)
.setEncryptionAgent(
new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1,
EncryptionAlgorithm.AES_CBC_256))
.setKeyWrappingMetadata(keyWrappingMetadata)
.setContentEncryptionIV(cipher.getIV())
.setWrappedContentKey(wrappedKey);
Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> {
int outputSize = cipher.getOutputSize(plainTextBuffer.remaining());
/*
This should be the only place we allocate memory in encryptBlob(). Although there is an
overload that can encrypt in place that would save allocations, we do not want to overwrite
customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable,
we should implement pooling.
*/
ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize);
int encryptedBytes;
try {
encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer);
} catch (ShortBufferException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
encryptedTextBuffer.position(0);
encryptedTextBuffer.limit(encryptedBytes);
return encryptedTextBuffer;
});
/*
Defer() ensures the contained code is not executed until the Flux is subscribed to, in
other words, cipher.doFinal() will not be called until the plainTextFlux has completed
and therefore all other data has been encrypted.
*/
encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> {
try {
return Flux.just(ByteBuffer.wrap(cipher.doFinal()));
} catch (GeneralSecurityException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
}));
return new EncryptedBlob(encryptionData, encryptedTextFlux);
});
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw logger.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Encrypt the blob and add the encryption metadata to the customer's metadata.
*
* @param plainText The data to encrypt
* @param metadata The customer's metadata to be updated.
*
* @return A Mono containing the cipher text
*/
private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText,
Map<String, String> metadata) {
try {
return this.encryptBlob(plainText)
.flatMap(encryptedBlob -> {
try {
metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY,
encryptedBlob.getEncryptionData().toJsonString());
return Mono.just(encryptedBlob.getCiphertextFlux());
} catch (JsonProcessingException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
});
} catch (InvalidKeyException e) {
throw logger.logExceptionAsError(Exceptions.propagate(e));
}
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Flux<ByteBuffer> query(String expression) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
/**
* Unsupported. Cannot query data encrypted on client side.
*/
@Override
public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Cannot query data encrypted on client side"));
}
} |
Thanks for getting this. I thought the information was getting rather verbose. | public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsError(new IllegalStateException(
String.format("namespace[%s] entityPath[%s]: Cannot subscribe. Processor is already terminated.",
fullyQualifiedNamespace, entityPath))));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscribers.add(subscriber);
logger.verbose("Added a subscriber {} to AMQP channel processor. Total "
+ "subscribers = {}", subscriber, subscribers.size());
if (!isRetryPending.get()) {
requestUpstream();
}
} | logger.verbose("Added a subscriber {} to AMQP channel processor. Total " | public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsError(new IllegalStateException(
String.format("namespace[%s] entityPath[%s]: Cannot subscribe. Processor is already terminated.",
fullyQualifiedNamespace, entityPath))));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscribers.add(subscriber);
logger.verbose("Added a subscriber {} to AMQP channel processor. Total "
+ "subscribers = {}", subscriber, subscribers.size());
if (!isRetryPending.get()) {
requestUpstream();
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class,
"upstream");
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final String fullyQualifiedNamespace;
private final String entityPath;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.fullyQualifiedNamespace = Objects
.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction,
"'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger = Objects.requireNonNull(logger, "'logger' cannot be null.");
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("namespace[{}] entityPath[{}]: Setting next AMQP channel.", fullyQualifiedNamespace, entityPath);
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
logger.info("namespace[{}] entityPath[{}]: Next AMQP channel received, updating {} current "
+ "subscribers", fullyQualifiedNamespace, entityPath, subscribers.size());
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(
state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("namespace[{}] entityPath[{}]: Channel is now active.",
fullyQualifiedNamespace, entityPath);
}
},
error -> {
setAndClearChannel();
onError(error);
},
() -> {
if (isDisposed()) {
logger.info("namespace[{}] entityPath[{}]: Channel is disposed.",
fullyQualifiedNamespace, entityPath);
} else {
logger.info("namespace[{}] entityPath[{}]: Channel is closed.",
fullyQualifiedNamespace, entityPath);
setAndClearChannel();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
@Override
public void onError(Throwable throwable) {
Objects.requireNonNull(throwable, "'throwable' is required.");
if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) {
logger.warning("Retry is already pending. Ignoring transient error.", throwable);
return;
}
int attemptsMade = retryAttempts.incrementAndGet();
if (throwable instanceof AmqpException) {
AmqpException amqpException = (AmqpException) throwable;
if (amqpException.isTransient()) {
logger.verbose("Attempted {} times to get a new AMQP connection", attemptsMade);
attemptsMade = Math.min(attemptsMade, retryPolicy.getMaxRetries());
}
}
final int attempt = attemptsMade;
final Duration retryInterval = retryPolicy.calculateRetryDelay(throwable, attempt);
if (retryInterval != null) {
if (isRetryPending.getAndSet(true)) {
retryAttempts.decrementAndGet();
return;
}
logger.warning("Retry
attempt, retryInterval.toMillis(), throwable);
retrySubscription = Mono.delay(retryInterval).subscribe(i -> {
if (isDisposed()) {
logger.info("Retry
} else {
logger.info("Retry
requestUpstream();
isRetryPending.set(false);
}
});
return;
}
logger.warning("Non-retryable error occurred in connection.", throwable);
lastError = throwable;
isDisposed.set(true);
dispose();
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
logger.info("namespace[{}] entityPath[{}]: Error in AMQP channel processor. Notifying {} "
+ "subscribers.", fullyQualifiedNamespace, entityPath, currentSubscribers.size());
currentSubscribers.forEach(subscriber -> subscriber.onError(throwable));
}
}
@Override
public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
logger.info("namespace[{}] entityPath[{}]: AMQP channel processor completed. Notifying {} "
+ "subscribers.", fullyQualifiedNamespace, entityPath, currentSubscribers.size());
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
}
@Override
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("namespace[{}] entityPath[{}]: Connection exists, not requesting another.",
fullyQualifiedNamespace, entityPath);
return;
} else if (isDisposed()) {
logger.verbose("namespace[{}] entityPath[{}]: Is already disposed.", fullyQualifiedNamespace, entityPath);
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("namespace[{}] entityPath[{}]: There is no upstream subscription.",
fullyQualifiedNamespace, entityPath);
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("namespace[{}] entityPath[{}]: Connection not requested, yet. Requesting one.",
fullyQualifiedNamespace, entityPath);
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
private void close(T channel) {
if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing item.", channel);
}
} else if (channel instanceof Disposable) {
((Disposable) channel).dispose();
}
}
/**
* Represents a subscriber, waiting for an AMQP connection.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
@Override
public void cancel() {
super.cancel();
processor.subscribers.remove(this);
}
@Override
public void onComplete() {
if (!isCancelled()) {
actual.onComplete();
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
super.complete(channel);
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
actual.onError(throwable);
} else {
Operators.onOperatorError(throwable, currentContext());
}
}
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM =
AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class,
"upstream");
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final String fullyQualifiedNamespace;
private final String entityPath;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.fullyQualifiedNamespace = Objects
.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction,
"'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger = Objects.requireNonNull(logger, "'logger' cannot be null.");
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("namespace[{}] entityPath[{}]: Setting next AMQP channel.", fullyQualifiedNamespace, entityPath);
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
logger.info("namespace[{}] entityPath[{}]: Next AMQP channel received, updating {} current "
+ "subscribers", fullyQualifiedNamespace, entityPath, subscribers.size());
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(
state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("namespace[{}] entityPath[{}]: Channel is now active.",
fullyQualifiedNamespace, entityPath);
}
},
error -> {
setAndClearChannel();
onError(error);
},
() -> {
if (isDisposed()) {
logger.info("namespace[{}] entityPath[{}]: Channel is disposed.",
fullyQualifiedNamespace, entityPath);
} else {
logger.info("namespace[{}] entityPath[{}]: Channel is closed.",
fullyQualifiedNamespace, entityPath);
setAndClearChannel();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
@Override
public void onError(Throwable throwable) {
Objects.requireNonNull(throwable, "'throwable' is required.");
if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) {
logger.warning("Retry is already pending. Ignoring transient error.", throwable);
return;
}
int attemptsMade = retryAttempts.incrementAndGet();
if (throwable instanceof AmqpException) {
AmqpException amqpException = (AmqpException) throwable;
if (amqpException.isTransient()) {
logger.verbose("Attempted {} times to get a new AMQP connection", attemptsMade);
attemptsMade = Math.min(attemptsMade, retryPolicy.getMaxRetries());
}
}
final int attempt = attemptsMade;
final Duration retryInterval = retryPolicy.calculateRetryDelay(throwable, attempt);
if (retryInterval != null) {
if (isRetryPending.getAndSet(true)) {
retryAttempts.decrementAndGet();
return;
}
logger.warning("Retry
attempt, retryInterval.toMillis(), throwable);
retrySubscription = Mono.delay(retryInterval).subscribe(i -> {
if (isDisposed()) {
logger.info("Retry
} else {
logger.info("Retry
requestUpstream();
isRetryPending.set(false);
}
});
return;
}
logger.warning("Non-retryable error occurred in connection.", throwable);
lastError = throwable;
isDisposed.set(true);
dispose();
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
logger.info("namespace[{}] entityPath[{}]: Error in AMQP channel processor. Notifying {} "
+ "subscribers.", fullyQualifiedNamespace, entityPath, currentSubscribers.size());
currentSubscribers.forEach(subscriber -> subscriber.onError(throwable));
}
}
@Override
public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
logger.info("namespace[{}] entityPath[{}]: AMQP channel processor completed. Notifying {} "
+ "subscribers.", fullyQualifiedNamespace, entityPath, currentSubscribers.size());
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
}
@Override
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("namespace[{}] entityPath[{}]: Connection exists, not requesting another.",
fullyQualifiedNamespace, entityPath);
return;
} else if (isDisposed()) {
logger.verbose("namespace[{}] entityPath[{}]: Is already disposed.", fullyQualifiedNamespace, entityPath);
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("namespace[{}] entityPath[{}]: There is no upstream subscription.",
fullyQualifiedNamespace, entityPath);
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("namespace[{}] entityPath[{}]: Connection not requested, yet. Requesting one.",
fullyQualifiedNamespace, entityPath);
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
private void close(T channel) {
if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing item.", channel);
}
} else if (channel instanceof Disposable) {
((Disposable) channel).dispose();
}
}
/**
* Represents a subscriber, waiting for an AMQP connection.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
@Override
public void cancel() {
super.cancel();
processor.subscribers.remove(this);
}
@Override
public void onComplete() {
if (!isCancelled()) {
actual.onComplete();
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
super.complete(channel);
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
actual.onError(throwable);
} else {
Operators.onOperatorError(throwable, currentContext());
}
}
}
} |
We should change this. Since this is in examples module, we should not use `*BridgeInternal` classes here. Also please check if you have not un-intentionally made this change in other public surface area (examples sub-module) | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes);
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes); | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} |
hmm, I need to change it because I hide the setIndexes method of IncludedPath, should I keep it public? Checked the .net SDK, the setter and getter are both internal. So I changed the setIndexes method to package private | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes);
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes); | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} |
chatted offline, make sense that do not call this new method here, since it is not accessible publicly. Will remove. | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes);
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes); | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} |
Removed from all tests. | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes);
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | ModelBridgeInternal.setIncludedPathIndexes(includedPath, indexes); | private DocumentCollection getMultiPartitionCollectionDefinition() {
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
List<String> paths = new ArrayList<>();
paths.add("/city");
partitionKeyDefinition.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
collectionDefinition.setIndexingPolicy(indexingPolicy);
return collectionDefinition;
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} | class CollectionCRUDAsyncAPITest extends DocumentClientTest {
private final static int TIMEOUT = 120000;
private Database createdDatabase;
private AsyncDocumentClient client;
private DocumentCollection collectionDefinition;
@BeforeClass(groups = "samples", timeOut = TIMEOUT)
public void before_CollectionCRUDAsyncAPITest() {
ConnectionPolicy connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.clientBuilder()
.withServiceEndpoint(TestConfigurations.HOST)
.withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY)
.withConnectionPolicy(connectionPolicy)
.withConsistencyLevel(ConsistencyLevel.SESSION)
.withContentResponseOnWriteEnabled(true);
this.client = this.clientBuilder().build();
createdDatabase = Utils.createDatabaseForTest(client);
}
@BeforeMethod(groups = "samples", timeOut = TIMEOUT)
public void before() {
collectionDefinition = new DocumentCollection();
collectionDefinition.setId(UUID.randomUUID().toString());
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
collectionDefinition.setPartitionKey(partitionKeyDef);
}
@AfterClass(groups = "samples", timeOut = TIMEOUT)
public void shutdown() {
Utils.safeClean(client, createdDatabase);
Utils.safeClose(client);
}
/**
* CREATE a document collection using async api.
* If you want a single partition collection with 10,000 RU/s throughput,
* the only way to do so is to create a single partition collection with lower
* throughput (400) and then increase the throughput.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_SinglePartition_Async() throws Exception {
RequestOptions singlePartitionRequestOptions = new RequestOptions();
singlePartitionRequestOptions.setOfferThroughput(400);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document collection using async api.
* This test uses java8 lambda expression.
* See testCreateCollection_Async_withoutLambda for usage without lambda
* expressions.
* Set the throughput to be > 10,000 RU/s
* to create a multi partition collection.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_MultiPartition_Async() throws Exception {
RequestOptions multiPartitionRequestOptions = new RequestOptions();
multiPartitionRequestOptions.setOfferThroughput(20000);
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client.createCollection(
getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions);
final CountDownLatch countDownLatch = new CountDownLatch(1);
createCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* CREATE a document Collection using async api, without java8 lambda expressions
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_Async_withoutLambda() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
Consumer<ResourceResponse<DocumentCollection>> onCollectionCreationAction = new Consumer<ResourceResponse<DocumentCollection>>() {
@Override
public void accept(ResourceResponse<DocumentCollection> resourceResponse) {
System.out.println(resourceResponse.getActivityId());
countDownLatch.countDown();
}
};
Consumer<Throwable> onError = new Consumer<Throwable>() {
@Override
public void accept(Throwable error) {
System.err.println(
"an error occurred while creating the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
}
};
createCollectionObservable.single()
.subscribe(onCollectionCreationAction, onError);
countDownLatch.await();
}
/**
* CREATE a collection in a blocking manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking() {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
createCollectionObservable.single().block();
}
/**
* Attempt to create a Collection which already exists
* - First create a Collection
* - Using the async api generate an async collection creation observable
* - Converts the Observable to blocking using Observable.toBlocking() api
* - Catch already exist failure (409)
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createCollection_toBlocking_CollectionAlreadyExists_Fails() {
client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block();
Mono<ResourceResponse<DocumentCollection>> collectionForTestObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
try {
collectionForTestObservable.single()
.block();
assertThat("Should not reach here", false);
} catch (CosmosException e) {
assertThat("Collection already exists.", e.getStatusCode(),
equalTo(409));
}
}
/**
* You can convert a Flux to a CompletableFuture.
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void transformObservableToCompletableFuture() throws Exception {
Mono<ResourceResponse<DocumentCollection>> createCollectionObservable = client
.createCollection(getDatabaseLink(), collectionDefinition, null);
CompletableFuture<ResourceResponse<DocumentCollection>> future = createCollectionObservable.single().toFuture();
ResourceResponse<DocumentCollection> rrd = future.get();
assertThat(rrd.getRequestCharge(), greaterThan((double) 0));
System.out.println(rrd.getRequestCharge());
}
/**
* READ a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndReadCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> readCollectionObservable = client
.readCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
readCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while reading the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* DELETE a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void createAndDeleteCollection() throws Exception {
DocumentCollection documentCollection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Mono<ResourceResponse<DocumentCollection>> deleteCollectionObservable = client
.deleteCollection(getCollectionLink(documentCollection), null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
deleteCollectionObservable.single()
.subscribe(collectionResourceResponse -> {
System.out.println(collectionResourceResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println(
"an error occurred while deleting the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
/**
* Query a Collection in an Async manner
*/
@Test(groups = "samples", timeOut = TIMEOUT)
public void collectionCreateAndQuery() throws Exception {
DocumentCollection collection = client
.createCollection(getDatabaseLink(), collectionDefinition, null).single().block()
.getResource();
Flux<FeedResponse<DocumentCollection>> queryCollectionObservable = client.queryCollections(
getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.getId()),
null);
final CountDownLatch countDownLatch = new CountDownLatch(1);
queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> {
assertThat(collectionFeedResponseList.size(), equalTo(1));
FeedResponse<DocumentCollection> collectionFeedResponse = collectionFeedResponseList.get(0);
assertThat(collectionFeedResponse.getResults().size(), equalTo(1));
DocumentCollection foundCollection = collectionFeedResponse.getResults().get(0);
assertThat(foundCollection.getId(), equalTo(collection.getId()));
System.out.println(collectionFeedResponse.getActivityId());
countDownLatch.countDown();
}, error -> {
System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage());
countDownLatch.countDown();
});
countDownLatch.await();
}
private String getDatabaseLink() {
return "dbs/" + createdDatabase.getId();
}
private String getCollectionLink(DocumentCollection collection) {
return "dbs/" + createdDatabase.getId() + "/colls/" + collection.getId();
}
} |
should I throw exception here? | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
} | if (proxy.getType() != ProxyOptions.Type.HTTP) { | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
}
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} |
should I also add the proxy type? | public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
} | ", proxyType=" + proxy.getType() + | public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
}
@Override
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
}
@Override
} |
Yes, throw `IllegalArgumentException` - with message - only Http Proxy type is supported. | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
} | if (proxy.getType() != ProxyOptions.Type.HTTP) { | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
}
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} |
to the `toString()` - yes. | public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
} | ", proxyType=" + proxy.getType() + | public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
}
@Override
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
}
@Override
} |
Thanks, changed the message to be "Only http proxy type is supported." | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
} | if (proxy.getType() != ProxyOptions.Type.HTTP) { | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
}
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} |
just found out that the spotbug checks all the properties should be referenced in the toString(), so need to exclude the proxy check for pattern UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR | public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
} | ", proxyType=" + proxy.getType() + | public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
}
@Override
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
}
@Override
} |
I am wondering why this wasn't a problem with `InetSocketAddress` ? It was also present in `toString()` and was not initialized in Constructor. | public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
} | ", proxyType=" + proxy.getType() + | public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
}
@Override
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
}
@Override
} |
oo, I see, I think the UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR was really checking is that I used proxy.getType, proxy.getAddress, but it might never be inistialized, so may get null pointer exception. Updated to check proxy is null or not. | public String toString() {
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxy.getType() +
", inetSocketProxyAddress=" + proxy.getAddress() +
'}';
} | ", proxyType=" + proxy.getType() + | public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Proxy type is not supported " + proxy.getType());
}
this.proxy = proxy;
return this;
}
@Override
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
}
@Override
} |
Alternatively we can wrap the original `Flux` inside a `Flux.defer` and have ```java final long[] currentTotalLength = new long[1]; ``` inside the defer which is some what more idiomatic I think. | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | }) | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
as we don't support password with proxy, shouldn't we throw if password is set? | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | throw new IllegalArgumentException("Only http proxy type is supported."); | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} |
I am not sure. may be we should, may be we should not, it will just get ignored. But we definitely should add more documentation to setProxy() API that we only support HTTP proxy as of now - without any username and password. I see, `setProxy()` docs are incorrect and should be updated. @xinlian12 - please create another PR for this - with updated docs - "that we only support HTTP proxy type with just the routing address. Username and password are not supported" | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | throw new IllegalArgumentException("Only http proxy type is supported."); | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} |
Thanks~ have created another PR for the docs update: chttps://github.com/Azure/azure-sdk-for-java/pull/11672 | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | throw new IllegalArgumentException("Only http proxy type is supported."); | public GatewayConnectionConfig setProxy(ProxyOptions proxy) {
if (proxy.getType() != ProxyOptions.Type.HTTP) {
throw new IllegalArgumentException("Only http proxy type is supported.");
}
this.proxy = proxy;
return this;
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} | class GatewayConnectionConfig {
private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(60);
private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final int DEFAULT_MAX_POOL_SIZE = 1000;
private Duration requestTimeout;
private int maxConnectionPoolSize;
private Duration idleConnectionTimeout;
private ProxyOptions proxy;
/**
* Constructor.
*/
public GatewayConnectionConfig() {
this.idleConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT;
this.maxConnectionPoolSize = DEFAULT_MAX_POOL_SIZE;
this.requestTimeout = DEFAULT_REQUEST_TIMEOUT;
}
/**
* Gets the default Gateway connection configuration.
*
* @return the default gateway connection configuration.
*/
public static GatewayConnectionConfig getDefaultConfig() {
return new GatewayConnectionConfig();
}
/**
* Gets the request timeout (time to wait for response from network peer).
*
* @return the request timeout duration.
*/
public Duration getRequestTimeout() {
return this.requestTimeout;
}
/**
* Sets the request timeout (time to wait for response from network peer).
* The default is 60 seconds.
*
* @param requestTimeout the request timeout duration.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setRequestTimeout(Duration requestTimeout) {
this.requestTimeout = requestTimeout;
return this;
}
/**
* Gets the value of the connection pool size the client is using.
*
* @return connection pool size.
*/
public int getMaxConnectionPoolSize() {
return this.maxConnectionPoolSize;
}
/**
* Sets the value of the connection pool size, the default
* is 1000.
*
* @param maxConnectionPoolSize The value of the connection pool size.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setMaxConnectionPoolSize(int maxConnectionPoolSize) {
this.maxConnectionPoolSize = maxConnectionPoolSize;
return this;
}
/**
* Gets the value of the timeout for an idle connection, the default is 60
* seconds.
*
* @return Idle connection timeout duration.
*/
public Duration getIdleConnectionTimeout() {
return this.idleConnectionTimeout;
}
/**
* sets the value of the timeout for an idle connection. After that time,
* the connection will be automatically closed.
*
* @param idleConnectionTimeout the duration for an idle connection.
* @return the {@link GatewayConnectionConfig}.
*/
public GatewayConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return this;
}
/**
* Gets the proxy options which contain the InetSocketAddress of proxy server.
*
* @return the proxy options.
*/
public ProxyOptions getProxy() {
return this.proxy;
}
/**
* This will create the InetSocketAddress for proxy server,
* all the requests to cosmoDB will route from this address.
*
* @param proxy The proxy server.
* @return the {@link GatewayConnectionConfig}.
*/
@Override
public String toString() {
String proxyType = proxy != null ? proxy.getType().toString() : null;
String proxyAddress = proxy != null ? proxy.getAddress().toString() : null;
return "GatewayConnectionConfig{" +
"requestTimeout=" + requestTimeout +
", maxConnectionPoolSize=" + maxConnectionPoolSize +
", idleConnectionTimeout=" + idleConnectionTimeout +
", proxyType=" + proxyType +
", inetSocketProxyAddress=" + proxyAddress +
'}';
}
} |
Should this be a `HttpResponseException` or just `AzureException`? `HttpResponseException` is usually an error returned by the HTTP call with a status code not equal to 2xx. From [HttpResponseException JavaDoc](https://azuresdkartifacts.blob.core.windows.net/azure-sdk-for-java/staging/apidocs/com/azure/core/exception/HttpResponseException.html): >The exception thrown when an unsuccessful response is received with http status code (e.g. 3XX, 4XX, 5XX) from the service request. | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | throw logger.logExceptionAsError(new HttpResponseException( | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} |
For failures for service requests (3XX, 4XX etc) we have [ErroeResponseException](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/formrecognizer/azure-ai-formrecognizer/src/main/java/com/azure/ai/formrecognizer/implementation/FormRecognizerClientImpl.java#L143). Using the equivalent for other languages C#- [`RequestFailedException`](https://github.com/Azure/azure-sdk-for-net/blob/master/sdk/formrecognizer/Azure.AI.FormRecognizer/src/CopyModelOperation.cs#L163) and Python - [`HttpResponseError`](https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py#L111) | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | throw logger.logExceptionAsError(new HttpResponseException( | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} |
Discussed offline. Maintaining the Javadoc for `HttpResponseException` and TA convention for exception handling, consider adding a new Exception Type or use AzureException. https://github.com/Azure/azure-sdk-for-java/issues/11705 | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | throw logger.logExceptionAsError(new HttpResponseException( | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} |
https://github.com/Azure/azure-sdk-for-java/pull/11720 | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | throw logger.logExceptionAsError(new HttpResponseException( | private void throwIfModelStatusInvalid(Model customModel) {
if (ModelStatus.INVALID.equals(customModel.getModelInfo().getStatus())) {
List<ErrorInformation> errorInformationList = customModel.getTrainResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException(
String.format("Invalid model created with ID: %s", customModel.getModelInfo().getModelId()),
null, errorInformationList));
}
}
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} | class FormTrainingAsyncClient {
private final ClientLogger logger = new ClientLogger(FormTrainingAsyncClient.class);
private final FormRecognizerClientImpl service;
private final FormRecognizerServiceVersion serviceVersion;
/**
* Create a {@link FormTrainingClient} that sends requests to the Form Recognizer service's endpoint.
* Each service call goes through the {@link FormTrainingClientBuilder
*
* @param service The proxy service used to perform REST calls.
* @param serviceVersion The versions of Azure Form Recognizer supported by this client library.
*/
FormTrainingAsyncClient(FormRecognizerClientImpl service, FormRecognizerServiceVersion serviceVersion) {
this.service = service;
this.serviceVersion = serviceVersion;
}
/**
* Creates a new {@link FormRecognizerAsyncClient} object. The new {@link FormTrainingAsyncClient}
* uses the same request policy pipeline as the {@link FormTrainingAsyncClient}.
*
* @return A new {@link FormRecognizerAsyncClient} object.
*/
public FormRecognizerAsyncClient getFormRecognizerAsyncClient() {
return new FormRecognizerClientBuilder().endpoint(getEndpoint()).pipeline(getHttpPipeline()).buildAsyncClient();
}
/**
* Gets the pipeline the client is using.
*
* @return the pipeline the client is using.
*/
HttpPipeline getHttpPipeline() {
return service.getHttpPipeline();
}
/**
* Gets the endpoint the client is using.
*
* @return the endpoint the client is using.
*/
String getEndpoint() {
return service.getEndpoint();
}
/**
* Create and train a custom model.
* Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.
* Other type of content is ignored.
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl source URL parameter that is either an externally accessible Azure
* storage blob container Uri (preferably a Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
*
* @return A {@link PollerFlux} that polls the training model operation until it has completed, has failed, or has
* been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels) {
return beginTraining(trainingFilesUrl, useTrainingLabels, null, null);
}
/**
* Create and train a custom model.
* <p>Models are trained using documents that are of the following content type -
* 'application/pdf', 'image/jpeg', 'image/png', 'image/tiff'.Other type of content is ignored.
* </p>
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginTraining
*
* @param trainingFilesUrl an externally accessible Azure storage blob container Uri (preferably a
* Shared Access Signature Uri).
* @param useTrainingLabels boolean to specify the use of labeled files for training the model.
* @param trainingFileFilter Filter to apply to the documents in the source path for training.
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the extract receipt operation until it
* has completed, has failed, or has been cancelled. The completed operation returns a {@link CustomFormModel}.
* @throws HttpResponseException If training fails and model with {@link ModelStatus
* @throws NullPointerException If {@code trainingFilesUrl} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModel> beginTraining(String trainingFilesUrl,
boolean useTrainingLabels, TrainingFileFilter trainingFileFilter, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModel>(
interval,
getTrainingActivationOperation(trainingFilesUrl,
trainingFileFilter != null ? trainingFileFilter.isIncludeSubFolders() : false,
trainingFileFilter != null ? trainingFileFilter.getPrefix() : null,
useTrainingLabels),
createTrainingPollOperation(),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchTrainingModelResultOperation());
}
/**
* Get detailed information for a specified custom model id.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModel
*
* @param modelId The UUID string format model identifier.
*
* @return The detailed information for the specified model.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CustomFormModel> getCustomModel(String modelId) {
return getCustomModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Get detailed information for a specified custom model id with Http response
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCustomModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Response} containing the requested {@link CustomFormModel model}.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId) {
try {
return withContext(context -> getCustomModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CustomFormModel>> getCustomModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.getCustomModelWithResponseAsync(UUID.fromString(modelId), true, context)
.map(response -> new SimpleResponse<>(response, toCustomFormModel(response.getValue())));
}
/**
* Get account information for all custom models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountProperties}
*
* @return The account information.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AccountProperties> getAccountProperties() {
return getAccountPropertiesWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Get account information.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getAccountPropertiesWithResponse}
*
* @return A {@link Response} containing the requested account information details.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AccountProperties>> getAccountPropertiesWithResponse() {
try {
return withContext(context -> getAccountPropertiesWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<AccountProperties>> getAccountPropertiesWithResponse(Context context) {
return service.getCustomModelsWithResponseAsync(context)
.map(response -> new SimpleResponse<>(response,
new AccountProperties(response.getValue().getSummary().getCount(),
response.getValue().getSummary().getLimit())));
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModel
*
* @param modelId The UUID string format model identifier.
*
* @return An empty Mono.
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteModel(String modelId) {
return deleteModelWithResponse(modelId).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified custom model.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.deleteModelWithResponse
*
* @param modelId The UUID string format model identifier.
*
* @return A {@link Mono} containing containing status code and HTTP headers
* @throws NullPointerException If {@code modelId} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteModelWithResponse(String modelId) {
try {
return withContext(context -> deleteModelWithResponse(modelId, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context) {
Objects.requireNonNull(modelId, "'modelId' cannot be null");
return service.deleteCustomModelWithResponseAsync(UUID.fromString(modelId), context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* List information for all models.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.listCustomModels}
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<CustomFormModelInfo> listCustomModels() {
try {
return new PagedFlux<>(() -> withContext(context -> listFirstPageModelInfo(context)),
continuationToken -> withContext(context -> listNextPageModelInfo(continuationToken, context)));
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(logger, ex));
}
}
/**
* List information for all models with taking {@link Context}.
*
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return {@link PagedFlux} of {@link CustomFormModelInfo}.
*/
PagedFlux<CustomFormModelInfo> listCustomModels(Context context) {
return new PagedFlux<>(() -> listFirstPageModelInfo(context),
continuationToken -> listNextPageModelInfo(continuationToken, context));
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target) {
return beginCopyModel(modelId, target, null);
}
/**
* Copy a custom model stored in this resource (the source) to the user specified target Form Recognizer resource.
*
* <p>This should be called with the source Form Recognizer resource (with the model that is intended to be copied).
* The target parameter should be supplied from the target resource's output from
* {@link FormTrainingAsyncClient
* </p>
*
* <p>The service does not support cancellation of the long running operation and returns with an
* error message indicating absence of cancellation support.</p>
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.beginCopyModel
*
* @param modelId Model identifier of the model to copy to the target Form Recognizer resource
* @param target the copy authorization to the target Form Recognizer resource. The copy authorization can be
* generated from the target resource's call to {@link FormTrainingAsyncClient
* @param pollInterval Duration between each poll for the operation status. If none is specified, a default of
* 5 seconds is used.
*
* @return A {@link PollerFlux} that polls the copy model operation until it has completed, has failed,
* or has been cancelled.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PollerFlux<OperationResult, CustomFormModelInfo> beginCopyModel(String modelId,
CopyAuthorization target, Duration pollInterval) {
final Duration interval = pollInterval != null ? pollInterval : DEFAULT_DURATION;
return new PollerFlux<OperationResult, CustomFormModelInfo>(
interval,
getCopyActivationOperation(modelId, target),
createCopyPollOperation(modelId),
(activationResponse, context) -> Mono.error(new RuntimeException("Cancellation is not supported")),
fetchCopyModelResultOperation(modelId, target.getModelId()));
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported
* by Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorization
*
* @return The {@link CopyAuthorization} that could be used to authorize copying model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<CopyAuthorization> getCopyAuthorization(String resourceId, String resourceRegion) {
return getCopyAuthorizationWithResponse(resourceId, resourceRegion).flatMap(FluxUtil::toMono);
}
/**
* Generate authorization for copying a custom model into the target Form Recognizer resource.
* This should be called by the target resource (where the model will be copied to) and the output can be passed as
* the target parameter into {@link FormTrainingAsyncClient
*
* @param resourceId Azure Resource Id of the target Form Recognizer resource where the model will be copied to.
* @param resourceRegion Location of the target Form Recognizer resource. A valid Azure region name supported by
* Cognitive Services.
*
* <p><strong>Code sample</strong></p>
* {@codesnippet com.azure.ai.formrecognizer.training.FormTrainingAsyncClient.getCopyAuthorizationWithResponse
*
* @return A {@link Response} containing the {@link CopyAuthorization} that could be used to authorize copying
* model between resources.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId,
String resourceRegion) {
try {
return withContext(context -> getCopyAuthorizationWithResponse(resourceId, resourceRegion, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<CopyAuthorization>> getCopyAuthorizationWithResponse(String resourceId, String resourceRegion,
Context context) {
Objects.requireNonNull(resourceId, "'resourceId' cannot be null");
Objects.requireNonNull(resourceRegion, "'resourceRegion' cannot be null");
return service.generateModelCopyAuthorizationWithResponseAsync(context)
.map(response -> {
CopyAuthorizationResult copyAuthorizationResult = response.getValue();
return new SimpleResponse<>(response, new CopyAuthorization(copyAuthorizationResult.getModelId(),
copyAuthorizationResult.getAccessToken(), resourceId, resourceRegion,
copyAuthorizationResult.getExpirationDateTimeTicks()));
});
}
private Mono<PagedResponse<CustomFormModelInfo>> listFirstPageModelInfo(Context context) {
return service.listCustomModelsSinglePageAsync(context)
.doOnRequest(ignoredValue -> logger.info("Listing information for all models"))
.doOnSuccess(response -> logger.info("Listed all models"))
.doOnError(error -> logger.warning("Failed to list all models information", error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Mono<PagedResponse<CustomFormModelInfo>> listNextPageModelInfo(String nextPageLink, Context context) {
if (CoreUtils.isNullOrEmpty(nextPageLink)) {
return Mono.empty();
}
return service.listCustomModelsNextSinglePageAsync(nextPageLink, context)
.doOnSubscribe(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink))
.doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink))
.doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink,
error))
.map(res -> new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
toCustomFormModelInfo(res.getValue()),
res.getContinuationToken(),
null));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModelInfo>> fetchCopyModelResultOperation(
String modelId, String copyModelId) {
return (pollingContext) -> {
try {
final UUID resultUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), resultUid)
.map(modelSimpleResponse -> {
CopyOperationResult copyOperationResult = modelSimpleResponse.getValue();
throwIfCopyOperationStatusInvalid(copyOperationResult);
return new CustomFormModelInfo(copyModelId,
copyOperationResult.getStatus() == OperationStatus.SUCCEEDED
? CustomFormModelStatus.READY
: CustomFormModelStatus.fromString(copyOperationResult.getStatus().toString()),
copyOperationResult.getCreatedDateTime(),
copyOperationResult.getLastUpdatedDateTime());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createCopyPollOperation(String modelId) {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID targetId = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelCopyResultWithResponseAsync(UUID.fromString(modelId), targetId)
.flatMap(modelSimpleResponse ->
processCopyModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getCopyActivationOperation(
String modelId, CopyAuthorization target) {
return (pollingContext) -> {
try {
Objects.requireNonNull(modelId, "'modelId' cannot be null.");
Objects.requireNonNull(target, "'target' cannot be null.");
CopyRequest copyRequest = new CopyRequest()
.setTargetResourceId(target.getResourceId())
.setTargetResourceRegion(target.getResourceRegion())
.setCopyAuthorization(new CopyAuthorizationResult()
.setModelId(target.getModelId())
.setAccessToken(target.getAccessToken())
.setExpirationDateTimeTicks(target.getExpiresOn()));
return service.copyCustomModelWithResponseAsync(UUID.fromString(modelId), copyRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getOperationLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Mono<PollResponse<OperationResult>> processCopyModelResponse(
SimpleResponse<CopyOperationResult> copyModel,
PollResponse<OperationResult> copyModelOperationResponse) {
LongRunningOperationStatus status;
switch (copyModel.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(copyModel.getValue().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, copyModelOperationResponse.getValue()));
}
private Function<PollingContext<OperationResult>, Mono<CustomFormModel>> fetchTrainingModelResultOperation() {
return (pollingContext) -> {
try {
final UUID modelUid = UUID.fromString(pollingContext.getLatestResponse().getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.map(modelSimpleResponse -> {
throwIfModelStatusInvalid(modelSimpleResponse.getValue());
return toCustomFormModel(modelSimpleResponse.getValue());
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<OperationResult>, Mono<PollResponse<OperationResult>>>
createTrainingPollOperation() {
return (pollingContext) -> {
try {
PollResponse<OperationResult> operationResultPollResponse = pollingContext.getLatestResponse();
UUID modelUid = UUID.fromString(operationResultPollResponse.getValue().getResultId());
return service.getCustomModelWithResponseAsync(modelUid, true)
.flatMap(modelSimpleResponse ->
processTrainingModelResponse(modelSimpleResponse, operationResultPollResponse));
} catch (HttpResponseException e) {
logger.logExceptionAsError(e);
return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null));
}
};
}
private Function<PollingContext<OperationResult>, Mono<OperationResult>> getTrainingActivationOperation(
String trainingFilesUrl, boolean includeSubFolders, String filePrefix, boolean useTrainingLabels) {
return (pollingContext) -> {
try {
Objects.requireNonNull(trainingFilesUrl, "'trainingFilesUrl' cannot be null.");
TrainSourceFilter trainSourceFilter = new TrainSourceFilter().setIncludeSubFolders(includeSubFolders)
.setPrefix(filePrefix);
TrainRequest serviceTrainRequest = new TrainRequest().setSource(trainingFilesUrl).
setSourceFilter(trainSourceFilter).setUseLabelFile(useTrainingLabels);
return service.trainCustomModelAsyncWithResponseAsync(serviceTrainRequest)
.map(response ->
new OperationResult(parseModelId(response.getDeserializedHeaders().getLocation())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private static Mono<PollResponse<OperationResult>> processTrainingModelResponse(
SimpleResponse<Model> trainingModel,
PollResponse<OperationResult> trainingModelOperationResponse) {
LongRunningOperationStatus status;
switch (trainingModel.getValue().getModelInfo().getStatus()) {
case CREATING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case READY:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case INVALID:
status = LongRunningOperationStatus.FAILED;
break;
default:
status = LongRunningOperationStatus.fromString(
trainingModel.getValue().getModelInfo().getStatus().toString(), true);
break;
}
return Mono.just(new PollResponse<>(status, trainingModelOperationResponse.getValue()));
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link CopyOperationResult
* {@link OperationStatus
*
* @param copyResult The copy operation response returned from the service.
*/
private void throwIfCopyOperationStatusInvalid(CopyOperationResult copyResult) {
if (copyResult.getStatus().equals(OperationStatus.FAILED)) {
List<ErrorInformation> errorInformationList = copyResult.getCopyResult().getErrors();
if (!CoreUtils.isNullOrEmpty(errorInformationList)) {
throw logger.logExceptionAsError(new HttpResponseException("Copy operation returned with a failed "
+ "status", null, errorInformationList));
}
}
}
/**
* Helper method that throws a {@link HttpResponseException} if {@link ModelInfo
* {@link com.azure.ai.formrecognizer.implementation.models.ModelStatus
*
* @param customModel The response returned from the service.
*/
} |
ohhh and does ` @since 9` signals that? or should there be a comment with that info? | public static void main(String[] args) throws IOException {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/"
+ "forms/Form_1.jpg");
byte[] fileContent = Files.readAllBytes(analyzeFile.toPath());
PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller =
client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(
toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}")
.setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true)
.setPollInterval(Duration.ofSeconds(5)));
PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller =
client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)),
analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF);
Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------Recognizing forms with labeled custom model--------");
labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm ->
labeledForm.getFields().forEach((label, formField) -> {
final StringBuilder boundingBoxStr = new StringBuilder();
if (formField.getValueText().getBoundingBox() != null) {
formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f,"
+ " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append);
}
System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score "
+ "of %.2f.%n",
label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr,
formField.getConfidence());
System.out.println("Value for a specific labeled field using the training-time label:");
labeledForm.getFields().entrySet()
.stream()
.filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey()))
.findAny()
.ifPresent(formFieldEntry ->
System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue()));
})));
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println("-----------------------------------------------------------");
System.out.println("-------Recognizing forms with unlabeled custom model-------");
unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm ->
unLabeledForm.getFields().forEach((label, formField) -> {
final StringBuilder boundingBoxStr = new StringBuilder();
if (formField.getValueText().getBoundingBox() != null) {
formField.getValueText().getBoundingBox().getPoints().stream().map(point ->
String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append);
}
final StringBuilder boundingBoxLabelStr = new StringBuilder();
if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) {
formField.getLabelText().getBoundingBox().getPoints().stream().map(point ->
String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append);
}
System.out.printf("Field %s has label %s within bounding box %s with a confidence score "
+ "of %.2f.%n",
label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence());
System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence "
+ "score of %.2f.%n",
label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr,
formField.getConfidence());
unLabeledForm.getFields().entrySet()
.stream()
.filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText()))
.findAny()
.ifPresent(formFieldEntry ->
System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue()));
})));
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | public static void main(String[] args) throws IOException {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
File analyzeFile = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/java/sample-forms/"
+ "forms/Form_1.jpg");
byte[] fileContent = Files.readAllBytes(analyzeFile.toPath());
PollerFlux<OperationResult, List<RecognizedForm>> labeledCustomFormPoller =
client.beginRecognizeCustomForms(new RecognizeCustomFormsOptions(
toFluxByteBuffer(new ByteArrayInputStream(fileContent)), analyzeFile.length(), "{labeled_model_Id}")
.setFormContentType(FormContentType.APPLICATION_PDF).setIncludeTextContent(true)
.setPollInterval(Duration.ofSeconds(5)));
PollerFlux<OperationResult, List<RecognizedForm>> unlabeledCustomFormPoller =
client.beginRecognizeCustomForms(toFluxByteBuffer(new ByteArrayInputStream(fileContent)),
analyzeFile.length(), "{unlabeled_model_Id}", FormContentType.APPLICATION_PDF);
Mono<List<RecognizedForm>> labeledDataResult = labeledCustomFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
Mono<List<RecognizedForm>> unlabeledDataResult = unlabeledCustomFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------Recognizing forms with labeled custom model--------");
labeledDataResult.subscribe(formsWithLabeledModel -> formsWithLabeledModel.forEach(labeledForm ->
labeledForm.getFields().forEach((label, formField) -> {
final StringBuilder boundingBoxStr = new StringBuilder();
if (formField.getValueText().getBoundingBox() != null) {
formField.getValueText().getBoundingBox().getPoints().stream().map(point -> String.format("[%.2f,"
+ " %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append);
}
System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence score "
+ "of %.2f.%n",
label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr,
formField.getConfidence());
System.out.println("Value for a specific labeled field using the training-time label:");
labeledForm.getFields().entrySet()
.stream()
.filter(formFieldEntry -> "Merchant".equals(formFieldEntry.getKey()))
.findAny()
.ifPresent(formFieldEntry ->
System.out.printf("The Merchant name is: %s%n", formFieldEntry.getValue()));
})));
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println("-----------------------------------------------------------");
System.out.println("-------Recognizing forms with unlabeled custom model-------");
unlabeledDataResult.subscribe(recognizedForms -> recognizedForms.forEach(unLabeledForm ->
unLabeledForm.getFields().forEach((label, formField) -> {
final StringBuilder boundingBoxStr = new StringBuilder();
if (formField.getValueText().getBoundingBox() != null) {
formField.getValueText().getBoundingBox().getPoints().stream().map(point ->
String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append);
}
final StringBuilder boundingBoxLabelStr = new StringBuilder();
if (formField.getLabelText() != null && formField.getLabelText().getBoundingBox() != null) {
formField.getLabelText().getBoundingBox().getPoints().stream().map(point ->
String.format("[%.2f, %.2f]", point.getX(), point.getY())).forEach(boundingBoxStr::append);
}
System.out.printf("Field %s has label %s within bounding box %s with a confidence score "
+ "of %.2f.%n",
label, formField.getLabelText().getText(), boundingBoxLabelStr, formField.getConfidence());
System.out.printf("Field %s has value %s based on %s within bounding box %s with a confidence "
+ "score of %.2f.%n",
label, formField.getFieldValue(), formField.getValueText().getText(), boundingBoxStr,
formField.getConfidence());
unLabeledForm.getFields().entrySet()
.stream()
.filter(formFieldEntry -> "Vendor Name:".equals(formFieldEntry.getValue().getLabelText().getText()))
.findAny()
.ifPresent(formFieldEntry ->
System.out.printf("The Vendor name is: %s%n", formFieldEntry.getValue()));
})));
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | class AdvancedDiffLabeledUnlabeledDataAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*
* @throws IOException Exception thrown when there is an error in reading all the bytes from the File.
*/
} | class AdvancedDiffLabeledUnlabeledDataAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*
* @throws IOException Exception thrown when there is an error in reading all the bytes from the File.
*/
} | |
It will also match what we do in `RestProxy`. https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/RestProxy.java#L146 | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | }) | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
Not all input streams support `reset()`. Some may throw IOException as specified in [javadoc](https://docs.oracle.com/javase/8/docs/api/java/io/InputStream.html#reset--). Also, if the input stream does support resetting the position, this will reset to the last marked position that may be different from where the user wanted us to read the stream from. | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | data.reset(); | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
Yes. The most common `InputStream` in the upload case, FileInputStream (FIS), does not support `reset`. Users might have to wrap FIS in `BufferedInputStream` (BIS) and give it as input to upload API. But BIS comes with extra allocation cost as worse as the size of the file. Few options I could think of are: 1. If we choose reset path, then make it clear in the java-doc that it is required to have "reset to the start" support for retry. Since `FileInputStream` is the most common and does not support reset out of the box, having a code sample/snippet showing how to create `InputStream` from `FileChannel` may help. `FileChannel` backing File does support setting read position. 2. another option is to take a `Supplier<InputStream>` which means, each subscription will invoke Supplier to get a new InputStream (hence read-position 0 for each instance), But this is a API change and needs to discuss and get approved, there are questions like the pattern for disposing of the obtained IS. | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | data.reset(); | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
Good call. I was just working with what was already there, but I can clean that up. | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | }) | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
Yes, I have added docs to all the apis which call this method indicating that the stream must be markable and giving guidance if it is not. I suggested opening a BlobOutputStream in those cases. I can also add a suggestion to consider wrapping it in a BufferedStream. I didn't add those javadocs to this method because it's in implementation, so those don't get generated anyway, but I can add that for our own internal purposes. Before calling this method, the calling method should be calling mark to ensure it always resets to the same place, although I'm thinking I should actually move that call to be within this method. I'm not terribly concerned about FIS in this case because I think if customers are uploading file data they are going to be calling uploadFromFile. It's also worth noting that requiring markability is consistent with the expectations we make on the fluxes passed in to the async equivalent--we require that those be replayable | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | data.reset(); | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
Don't the async operations support non-replayable publishers? This [javadoc](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java#L242) says that the flux doesn't have to be replayable. | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | data.reset(); | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
Those are on the BlobClient, which does support non-replayable publishers for both async and sync. The analogues I'm referring to are on BlockBlobAsyncClient. e.g. [stage block](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobAsyncClient.java#L229) | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
final long[] currentTotalLength = new long[1];
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
})
.doFirst(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
currentTotalLength[0] = 0;
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
} | data.reset(); | public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) {
data.mark(Integer.MAX_VALUE);
return Flux.defer(() -> {
/*
If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be
reset in order to correctly return the same data again.
*/
final long[] currentTotalLength = new long[1];
try {
data.reset();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize))
.map(i -> i * blockSize)
.concatMap(pos -> Mono.fromCallable(() -> {
long count = pos + blockSize > length ? length - pos : blockSize;
byte[] cache = new byte[(int) count];
int numOfBytes = 0;
int offset = 0;
int len = (int) count;
while (numOfBytes != -1 && offset < count) {
numOfBytes = data.read(cache, offset, len);
offset += numOfBytes;
len -= numOfBytes;
if (numOfBytes != -1) {
currentTotalLength[0] += numOfBytes;
}
}
if (numOfBytes == -1 && currentTotalLength[0] < length) {
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, less than the expected %d bytes.",
currentTotalLength[0], length), currentTotalLength[0], length));
}
return ByteBuffer.wrap(cache);
}))
.doOnComplete(() -> {
try {
if (data.available() > 0) {
long totalLength = currentTotalLength[0] + data.available();
throw LOGGER.logExceptionAsError(new UnexpectedLengthException(
String.format("Request body emitted %d bytes, more than the expected %d bytes.",
totalLength, length), totalLength, length));
} else if (currentTotalLength[0] > length) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
String.format("Read more data than was requested. Size of data read: %d. Size of data"
+ " requested: %d", currentTotalLength[0], length)));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurs. Error details: "
+ e.getMessage()));
}
});
});
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private static final String UTF8_CHARSET = "UTF-8";
private static final String INVALID_DATE_STRING = "Invalid Date String: %s.";
public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage";
/**
* Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing.
*/
private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Stores a reference to the ISO8601 date/time pattern.
*/
private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'";
/**
* The length of a datestring that matches the MAX_PRECISION_PATTERN.
*/
private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "")
.length();
/**
* Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than
* replacing it with a space character.
*
* @param stringToDecode String value to decode
* @return the decoded string value
* @throws RuntimeException If the UTF-8 charset isn't supported
*/
public static String urlDecode(final String stringToDecode) {
if (CoreUtils.isNullOrEmpty(stringToDecode)) {
return "";
}
if (stringToDecode.contains("+")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToDecode.length(); m++) {
if (stringToDecode.charAt(m) == '+') {
if (m > startDex) {
outBuilder.append(decode(stringToDecode.substring(startDex, m)));
}
outBuilder.append("+");
startDex = m + 1;
}
}
if (startDex != stringToDecode.length()) {
outBuilder.append(decode(stringToDecode.substring(startDex)));
}
return outBuilder.toString();
} else {
return decode(stringToDecode);
}
}
/*
* Helper method to reduce duplicate calls of URLDecoder.decode
*/
private static String decode(final String stringToDecode) {
try {
return URLDecoder.decode(stringToDecode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of
* inserting the {@code +} character.
*
* @param stringToEncode String value to encode
* @return the encoded string value
* @throws RuntimeException If the UTF-8 charset ins't supported
*/
public static String urlEncode(final String stringToEncode) {
if (stringToEncode == null) {
return null;
}
if (stringToEncode.length() == 0) {
return "";
}
if (stringToEncode.contains(" ")) {
StringBuilder outBuilder = new StringBuilder();
int startDex = 0;
for (int m = 0; m < stringToEncode.length(); m++) {
if (stringToEncode.charAt(m) == ' ') {
if (m > startDex) {
outBuilder.append(encode(stringToEncode.substring(startDex, m)));
}
outBuilder.append("%20");
startDex = m + 1;
}
}
if (startDex != stringToEncode.length()) {
outBuilder.append(encode(stringToEncode.substring(startDex)));
}
return outBuilder.toString();
} else {
return encode(stringToEncode);
}
}
/*
* Helper method to reduce duplicate calls of URLEncoder.encode
*/
private static String encode(final String stringToEncode) {
try {
return URLEncoder.encode(stringToEncode, UTF8_CHARSET);
} catch (UnsupportedEncodingException ex) {
throw new RuntimeException(ex);
}
}
/**
* Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to
* millisecond precision.
*
* @param dateString the {@code String} to be interpreted as a <code>Date</code>
* @return the corresponding <code>Date</code> object
* @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern
*/
public static OffsetDateTime parseDate(String dateString) {
String pattern = MAX_PRECISION_PATTERN;
switch (dateString.length()) {
case 28:
case 27:
case 26:
case 25:
case 24:
dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH);
break;
case 23:
dateString = dateString.replace("Z", "0");
break;
case 22:
dateString = dateString.replace("Z", "00");
break;
case 20:
pattern = Utility.ISO8601_PATTERN;
break;
case 17:
pattern = Utility.ISO8601_PATTERN_NO_SECONDS;
break;
default:
throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString));
}
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT);
return LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime();
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length
* and the input length.
*
* @param data The input data which needs to convert to ByteBuffer.
* @param length The expected input data length.
* @param blockSize The size of each ByteBuffer.
* @return {@link ByteBuffer} which contains the input data.
* @throws UnexpectedLengthException when input data length mismatch input length.
* @throws RuntimeException When I/O error occurs.
*/
} |
`areOriginalTokensReplaced`? | public static PhoneticTokenFilter map(com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter obj) {
if (obj == null) {
return null;
}
PhoneticTokenFilter phoneticTokenFilter = new PhoneticTokenFilter();
String name = obj.getName();
phoneticTokenFilter.setName(name);
Boolean replaceOriginalTokens = obj.isOriginalTokensReplaced();
phoneticTokenFilter.setReplaceOriginalTokens(replaceOriginalTokens);
if (obj.getEncoder() != null) {
PhoneticEncoder encoder = PhoneticEncoderConverter.map(obj.getEncoder());
phoneticTokenFilter.setEncoder(encoder);
}
return phoneticTokenFilter;
} | Boolean replaceOriginalTokens = obj.isOriginalTokensReplaced(); | public static PhoneticTokenFilter map(com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter obj) {
if (obj == null) {
return null;
}
PhoneticTokenFilter phoneticTokenFilter = new PhoneticTokenFilter();
String name = obj.getName();
phoneticTokenFilter.setName(name);
Boolean replaceOriginalTokens = obj.isReplaceOriginalTokens();
phoneticTokenFilter.setOriginalTokensReplaced(replaceOriginalTokens);
if (obj.getEncoder() != null) {
PhoneticEncoder encoder = PhoneticEncoderConverter.map(obj.getEncoder());
phoneticTokenFilter.setEncoder(encoder);
}
return phoneticTokenFilter;
} | class PhoneticTokenFilterConverter {
/**
* Maps from {@link com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter} to
* {@link PhoneticTokenFilter}.
*/
/**
* Maps from {@link PhoneticTokenFilter} to
* {@link com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter}.
*/
public static com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter map(PhoneticTokenFilter obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter phoneticTokenFilter =
new com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter();
String name = obj.getName();
phoneticTokenFilter.setName(name);
Boolean replaceOriginalTokens = obj.isReplaceOriginalTokens();
phoneticTokenFilter.setOriginalTokensReplaced(replaceOriginalTokens);
if (obj.getEncoder() != null) {
com.azure.search.documents.indexes.implementation.models.PhoneticEncoder encoder =
PhoneticEncoderConverter.map(obj.getEncoder());
phoneticTokenFilter.setEncoder(encoder);
}
return phoneticTokenFilter;
}
private PhoneticTokenFilterConverter() {
}
} | class PhoneticTokenFilterConverter {
/**
* Maps from {@link com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter} to
* {@link PhoneticTokenFilter}.
*/
/**
* Maps from {@link PhoneticTokenFilter} to
* {@link com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter}.
*/
public static com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter map(PhoneticTokenFilter obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter phoneticTokenFilter =
new com.azure.search.documents.indexes.implementation.models.PhoneticTokenFilter();
String name = obj.getName();
phoneticTokenFilter.setName(name);
Boolean replaceOriginalTokens = obj.areOriginalTokensReplaced();
phoneticTokenFilter.setReplaceOriginalTokens(replaceOriginalTokens);
if (obj.getEncoder() != null) {
com.azure.search.documents.indexes.implementation.models.PhoneticEncoder encoder =
PhoneticEncoderConverter.map(obj.getEncoder());
phoneticTokenFilter.setEncoder(encoder);
}
return phoneticTokenFilter;
}
private PhoneticTokenFilterConverter() {
}
} |
This should be done once in the constructor instead of doing it every time `getMessage()` is called. | public String getMessage() {
final String baseMessage = super.getMessage();
StringBuilder errorInformationMessage = new StringBuilder().append(baseMessage);
if (errorInformationList.size() > 0) {
for (ErrorInformation errorInformation : errorInformationList) {
errorInformationMessage.append(", " + "errorCode" + ": [" + errorInformation.getCode()
+ "], " + "message" + ": " + errorInformation.getMessage());
}
}
return errorInformationMessage.toString();
} | return errorInformationMessage.toString(); | public String getMessage() {
return this.errorInformationMessage;
} | class FormRecognizerException extends AzureException {
private final List<ErrorInformation> errorInformationList;
/**
* Initializes a new instance of {@link FormRecognizerException} class
*
* @param message Text containing the details of the exception.
* @param errorInformationList The List of error information that caused the exception
*/
public FormRecognizerException(final String message, final List<ErrorInformation> errorInformationList) {
super(message);
this.errorInformationList = errorInformationList;
}
@Override
/**
* Get the error information list for this exception.
*
* @return the error information list for this exception.
*/
public List<ErrorInformation> getErrorInformation() {
return this.errorInformationList;
}
} | class FormRecognizerException extends AzureException {
private final List<ErrorInformation> errorInformationList;
private final String errorInformationMessage;
/**
* Initializes a new instance of {@link FormRecognizerException} class
*
* @param message Text containing the details of the exception.
* @param errorInformationList The List of error information that caused the exception
*/
public FormRecognizerException(final String message, final List<ErrorInformation> errorInformationList) {
super(message);
StringBuilder errorInformationStringBuilder = new StringBuilder().append(message);
if (errorInformationList.size() > 0) {
for (ErrorInformation errorInformation : errorInformationList) {
errorInformationStringBuilder.append(", " + "errorCode" + ": [" + errorInformation.getCode()
+ "], " + "message" + ": " + errorInformation.getMessage());
}
}
this.errorInformationMessage = errorInformationStringBuilder.toString();
this.errorInformationList = errorInformationList;
}
@Override
/**
* Get the error information list for this exception.
*
* @return the error information list for this exception.
*/
public List<ErrorInformation> getErrorInformation() {
return this.errorInformationList;
}
} |
I thought we need to throw error in this case, as CFP will not work in consistency below `SESSION`, no ? | public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosAsyncContainer leaseClient) {
if (leaseClient == null) {
throw new IllegalArgumentException("leaseClient");
}
if (!getContextClient(leaseClient).isContentResponseOnWriteEnabled()) {
throw new IllegalArgumentException("leaseClient: content response on write setting must be enabled");
}
ConsistencyLevel consistencyLevel = getContextClient(leaseClient).getConsistencyLevel();
if (consistencyLevel == ConsistencyLevel.CONSISTENT_PREFIX || consistencyLevel == ConsistencyLevel.EVENTUAL) {
logger.warn("leaseClient consistency level setting are less then expected which is SESSION");
}
this.leaseContextClient = new ChangeFeedContextClientImpl(leaseClient);
return this;
} | logger.warn("leaseClient consistency level setting are less then expected which is SESSION"); | public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosAsyncContainer leaseClient) {
if (leaseClient == null) {
throw new IllegalArgumentException("leaseClient");
}
if (!getContextClient(leaseClient).isContentResponseOnWriteEnabled()) {
throw new IllegalArgumentException("leaseClient: content response on write setting must be enabled");
}
ConsistencyLevel consistencyLevel = getContextClient(leaseClient).getConsistencyLevel();
if (consistencyLevel == ConsistencyLevel.CONSISTENT_PREFIX || consistencyLevel == ConsistencyLevel.EVENTUAL) {
logger.warn("leaseClient consistency level setting are less then expected which is SESSION");
}
this.leaseContextClient = new ChangeFeedContextClientImpl(leaseClient);
return this;
} | class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(ChangeFeedProcessorBuilderImpl.class);
private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis();
private final Duration sleepTime = Duration.ofSeconds(15);
private final Duration lockTime = Duration.ofSeconds(30);
private static final int DefaultQueryPartitionsMaxBatchSize = 100;
private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
private int degreeOfParallelism = 25;
private String hostName;
private ChangeFeedContextClient feedContextClient;
private ChangeFeedProcessorOptions changeFeedProcessorOptions;
private ChangeFeedObserverFactory observerFactory;
private volatile String databaseResourceId;
private volatile String collectionResourceId;
private ChangeFeedContextClient leaseContextClient;
private PartitionLoadBalancingStrategy loadBalancingStrategy;
private PartitionProcessorFactory partitionProcessorFactory;
private LeaseStoreManager leaseStoreManager;
private HealthMonitor healthMonitor;
private volatile PartitionManager partitionManager;
private Scheduler scheduler;
/**
* Start listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> start() {
if (this.partitionManager == null) {
return this.initializeCollectionPropertiesForBuild()
.flatMap( value -> this.getLeaseStoreManager()
.flatMap(leaseStoreManager -> this.buildPartitionManager(leaseStoreManager)))
.flatMap(partitionManager1 -> {
this.partitionManager = partitionManager1;
return this.partitionManager.start();
});
} else {
return partitionManager.start();
}
}
/**
* Stops listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> stop() {
if (this.partitionManager == null || !this.partitionManager.isRunning()) {
throw new IllegalStateException("The ChangeFeedProcessor instance has not fully started");
}
return this.partitionManager.stop();
}
/**
* Returns the state of the change feed processor.
*
* @return true if the change feed processor is currently active and running.
*/
@Override
public boolean isStarted() {
return this.partitionManager != null && this.partitionManager.isRunning();
}
/**
* Returns the current owner (host) and an approximation of the difference between the last processed item (defined
* by the state of the feed container) and the latest change in the container for each partition (lease
* document).
* <p>
* An empty map will be returned if the processor was not started or no lease documents matching the current
* {@link ChangeFeedProcessor} instance's lease prefix could be found.
*
* @return a map representing the current owner and lease token, the current LSN and latest LSN, and the estimated
* lag, asynchronously.
*/
@Override
public Mono<Map<String, Integer>> getEstimatedLag() {
Map<String, Integer> earlyResult = new ConcurrentHashMap<>();
if (this.leaseStoreManager == null || this.feedContextClient == null) {
return Mono.just(earlyResult);
}
return this.leaseStoreManager.getAllLeases()
.flatMap(lease -> {
ChangeFeedOptions options = new ChangeFeedOptions()
.setMaxItemCount(1)
.setPartitionKeyRangeId(lease.getLeaseToken())
.setStartFromBeginning(true)
.setRequestContinuation(lease.getContinuationToken());
return this.feedContextClient.createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options)
.take(1)
.map(feedResponse -> {
final String pkRangeIdSeparator = ":";
final String segmentSeparator = "
final String lsnPropertyName = "_lsn";
String ownerValue = lease.getOwner();
String sessionTokenLsn = feedResponse.getSessionToken();
String parsedSessionToken = sessionTokenLsn.substring(sessionTokenLsn.indexOf(pkRangeIdSeparator));
String[] segments = parsedSessionToken.split(segmentSeparator);
String latestLsn = segments[0];
if (segments.length >= 2) {
latestLsn = segments[1];
}
if (ownerValue == null) {
ownerValue = "";
}
if (feedResponse.getResults() == null || feedResponse.getResults().size() == 0) {
return Pair.of(ownerValue + "_" + lease.getLeaseToken(), 0);
}
Integer currentLsn = 0;
Integer estimatedLag = 0;
try {
currentLsn = Integer.valueOf(feedResponse.getResults().get(0).get(lsnPropertyName).asText("0"));
estimatedLag = Integer.valueOf(latestLsn);
estimatedLag = estimatedLag - currentLsn + 1;
} catch (NumberFormatException ex) {
logger.warn("Unexpected Cosmos LSN found", ex);
estimatedLag = -1;
}
return Pair.of(ownerValue + "_" + lease.getLeaseToken() + "_" + currentLsn + "_" + latestLsn, estimatedLag);
});
})
.collectList()
.map(valueList -> {
Map<String, Integer> result = new ConcurrentHashMap<>();
for (Pair<String, Integer> pair : valueList) {
result.put(pair.getKey(), pair.getValue());
}
return result;
});
}
/**
* Sets the host name.
*
* @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl hostName(String hostName) {
this.hostName = hostName;
return this;
}
/**
* Sets and existing {@link CosmosAsyncContainer} to be used to read from the monitored collection.
*
* @param feedDocumentClient the instance of {@link CosmosAsyncContainer} to be used.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl feedContainer(CosmosAsyncContainer feedDocumentClient) {
if (feedDocumentClient == null) {
throw new IllegalArgumentException("feedContextClient");
}
this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient);
return this;
}
/**
* Sets the {@link ChangeFeedProcessorOptions} to be used.
*
* @param changeFeedProcessorOptions the change feed processor options to use.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) {
if (changeFeedProcessorOptions == null) {
throw new IllegalArgumentException("changeFeedProcessorOptions");
}
this.changeFeedProcessorOptions = changeFeedProcessorOptions;
return this;
}
/**
* Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver}
*
* @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) {
if (observerFactory == null) {
throw new IllegalArgumentException("observerFactory");
}
this.observerFactory = observerFactory;
return this;
}
/**
* Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes.
* @param type the type of {@link ChangeFeedObserver} to be used.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observer(Class<? extends ChangeFeedObserver> type) {
if (type == null) {
throw new IllegalArgumentException("type");
}
this.observerFactory = new ChangeFeedObserverFactoryImpl(type);
return this;
}
@Override
public ChangeFeedProcessorBuilderImpl handleChanges(Consumer<List<JsonNode>> consumer) {
return this.observerFactory(new DefaultObserverFactory(consumer));
}
/**
* Sets the database resource ID of the monitored collection.
*
* @param databaseResourceId the database resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) {
this.databaseResourceId = databaseResourceId;
return this;
}
/**
* Sets the collection resource ID of the monitored collection.
* @param collectionResourceId the collection resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) {
this.collectionResourceId = collectionResourceId;
return this;
}
/**
* Sets an existing {@link CosmosAsyncContainer} to be used to read from the leases collection.
*
* @param leaseClient the instance of {@link CosmosAsyncContainer} to use.
* @return current Builder.
*/
@Override
/**
* Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
*
* @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) {
if (loadBalancingStrategy == null) {
throw new IllegalArgumentException("loadBalancingStrategy");
}
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing.
*
* @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) {
if (partitionProcessorFactory == null) {
throw new IllegalArgumentException("partitionProcessorFactory");
}
this.partitionProcessorFactory = partitionProcessorFactory;
return this;
}
/**
* Sets the {@link LeaseStoreManager} to be used to manage leases.
*
* @param leaseStoreManager the instance of {@link LeaseStoreManager} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) {
if (leaseStoreManager == null) {
throw new IllegalArgumentException("leaseStoreManager");
}
this.leaseStoreManager = leaseStoreManager;
return this;
}
/**
* Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation.
*
* @param healthMonitor The instance of {@link HealthMonitor} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) {
if (healthMonitor == null) {
throw new IllegalArgumentException("healthMonitor");
}
this.healthMonitor = healthMonitor;
return this;
}
/**
* Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously.
*
* @return an instance of {@link ChangeFeedProcessor}.
*/
@Override
public ChangeFeedProcessor build() {
if (this.hostName == null) {
throw new IllegalArgumentException("Host name was not specified");
}
if (this.observerFactory == null) {
throw new IllegalArgumentException("Observer was not specified");
}
if (this.scheduler == null) {
this.scheduler = Schedulers.elastic();
}
return this;
}
public ChangeFeedProcessorBuilderImpl() {
this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
this.degreeOfParallelism = 25;
}
public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) {
this.partitionManager = partitionManager;
}
private Mono<ChangeFeedProcessor> initializeCollectionPropertiesForBuild() {
if (this.changeFeedProcessorOptions == null) {
this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
}
return this.feedContextClient
.readDatabase(this.feedContextClient.getDatabaseClient(), null)
.map( databaseResourceResponse -> {
this.databaseResourceId = databaseResourceResponse.getProperties().getId();
return this.databaseResourceId;
})
.flatMap( id -> this.feedContextClient
.readContainer(this.feedContextClient.getContainerClient(), null)
.map(documentCollectionResourceResponse -> {
this.collectionResourceId = documentCollectionResourceResponse.getProperties().getId();
return this;
}));
}
private Mono<LeaseStoreManager> getLeaseStoreManager() {
if (this.leaseStoreManager == null) {
return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null)
.flatMap( collectionSettings -> {
boolean isPartitioned =
collectionSettings.getPartitionKeyDefinition() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths().size() > 0;
if (!isPartitioned || (collectionSettings.getPartitionKeyDefinition().getPaths().size() != 1 || !collectionSettings.getPartitionKeyDefinition().getPaths().get(0).equals("/id"))) {
return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id."));
}
RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory();
String leasePrefix = this.getLeasePrefix();
return LeaseStoreManager.builder()
.leasePrefix(leasePrefix)
.leaseCollectionLink(this.leaseContextClient.getContainerClient())
.leaseContextClient(this.leaseContextClient)
.requestOptionsFactory(requestOptionsFactory)
.hostName(this.hostName)
.build()
.map(manager -> {
this.leaseStoreManager = manager;
return this.leaseStoreManager;
});
});
}
return Mono.just(this.leaseStoreManager);
}
private String getLeasePrefix() {
String optionsPrefix = this.changeFeedProcessorOptions.getLeasePrefix();
if (optionsPrefix == null) {
optionsPrefix = "";
}
URI uri = this.feedContextClient.getServiceEndpoint();
return String.format(
"%s%s_%s_%s",
optionsPrefix,
uri.getHost(),
this.databaseResourceId,
this.collectionResourceId);
}
private Mono<PartitionManager> buildPartitionManager(LeaseStoreManager leaseStoreManager) {
CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency());
PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl(
this.feedContextClient,
this.feedContextClient.getContainerClient(),
leaseStoreManager,
leaseStoreManager,
this.degreeOfParallelism,
this.queryPartitionsMaxBatchSize
);
Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime);
PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl(
factory,
leaseStoreManager,
this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl(
this.feedContextClient,
this.changeFeedProcessorOptions,
leaseStoreManager,
this.feedContextClient.getContainerClient()),
this.changeFeedProcessorOptions,
this.scheduler
);
if (this.loadBalancingStrategy == null) {
this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy(
this.hostName,
this.changeFeedProcessorOptions.getMinScaleCount(),
this.changeFeedProcessorOptions.getMaxScaleCount(),
this.changeFeedProcessorOptions.getLeaseExpirationInterval());
}
PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, scheduler);
if (this.healthMonitor == null) {
this.healthMonitor = new TraceHealthMonitor();
}
PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor);
PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl(
partitionController2,
leaseStoreManager,
this.loadBalancingStrategy,
this.changeFeedProcessorOptions.getLeaseAcquireInterval(),
this.scheduler
);
PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer);
return Mono.just(partitionManager);
}
@Override
public void close() {
this.stop().subscribeOn(Schedulers.elastic()).subscribe();
}
} | class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(ChangeFeedProcessorBuilderImpl.class);
private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis();
private final Duration sleepTime = Duration.ofSeconds(15);
private final Duration lockTime = Duration.ofSeconds(30);
private static final int DefaultQueryPartitionsMaxBatchSize = 100;
private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
private int degreeOfParallelism = 25;
private String hostName;
private ChangeFeedContextClient feedContextClient;
private ChangeFeedProcessorOptions changeFeedProcessorOptions;
private ChangeFeedObserverFactory observerFactory;
private volatile String databaseResourceId;
private volatile String collectionResourceId;
private ChangeFeedContextClient leaseContextClient;
private PartitionLoadBalancingStrategy loadBalancingStrategy;
private PartitionProcessorFactory partitionProcessorFactory;
private LeaseStoreManager leaseStoreManager;
private HealthMonitor healthMonitor;
private volatile PartitionManager partitionManager;
private Scheduler scheduler;
/**
* Start listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> start() {
if (this.partitionManager == null) {
return this.initializeCollectionPropertiesForBuild()
.flatMap( value -> this.getLeaseStoreManager()
.flatMap(leaseStoreManager -> this.buildPartitionManager(leaseStoreManager)))
.flatMap(partitionManager1 -> {
this.partitionManager = partitionManager1;
return this.partitionManager.start();
});
} else {
return partitionManager.start();
}
}
/**
* Stops listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> stop() {
if (this.partitionManager == null || !this.partitionManager.isRunning()) {
throw new IllegalStateException("The ChangeFeedProcessor instance has not fully started");
}
return this.partitionManager.stop();
}
/**
* Returns the state of the change feed processor.
*
* @return true if the change feed processor is currently active and running.
*/
@Override
public boolean isStarted() {
return this.partitionManager != null && this.partitionManager.isRunning();
}
/**
* Returns the current owner (host) and an approximation of the difference between the last processed item (defined
* by the state of the feed container) and the latest change in the container for each partition (lease
* document).
* <p>
* An empty map will be returned if the processor was not started or no lease documents matching the current
* {@link ChangeFeedProcessor} instance's lease prefix could be found.
*
* @return a map representing the current owner and lease token, the current LSN and latest LSN, and the estimated
* lag, asynchronously.
*/
@Override
public Mono<Map<String, Integer>> getEstimatedLag() {
Map<String, Integer> earlyResult = new ConcurrentHashMap<>();
if (this.leaseStoreManager == null || this.feedContextClient == null) {
return Mono.just(earlyResult);
}
return this.leaseStoreManager.getAllLeases()
.flatMap(lease -> {
ChangeFeedOptions options = new ChangeFeedOptions()
.setMaxItemCount(1)
.setPartitionKeyRangeId(lease.getLeaseToken())
.setStartFromBeginning(true)
.setRequestContinuation(lease.getContinuationToken());
return this.feedContextClient.createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options)
.take(1)
.map(feedResponse -> {
final String pkRangeIdSeparator = ":";
final String segmentSeparator = "
final String lsnPropertyName = "_lsn";
String ownerValue = lease.getOwner();
String sessionTokenLsn = feedResponse.getSessionToken();
String parsedSessionToken = sessionTokenLsn.substring(sessionTokenLsn.indexOf(pkRangeIdSeparator));
String[] segments = parsedSessionToken.split(segmentSeparator);
String latestLsn = segments[0];
if (segments.length >= 2) {
latestLsn = segments[1];
}
if (ownerValue == null) {
ownerValue = "";
}
if (feedResponse.getResults() == null || feedResponse.getResults().size() == 0) {
return Pair.of(ownerValue + "_" + lease.getLeaseToken(), 0);
}
Integer currentLsn = 0;
Integer estimatedLag = 0;
try {
currentLsn = Integer.valueOf(feedResponse.getResults().get(0).get(lsnPropertyName).asText("0"));
estimatedLag = Integer.valueOf(latestLsn);
estimatedLag = estimatedLag - currentLsn + 1;
} catch (NumberFormatException ex) {
logger.warn("Unexpected Cosmos LSN found", ex);
estimatedLag = -1;
}
return Pair.of(ownerValue + "_" + lease.getLeaseToken() + "_" + currentLsn + "_" + latestLsn, estimatedLag);
});
})
.collectList()
.map(valueList -> {
Map<String, Integer> result = new ConcurrentHashMap<>();
for (Pair<String, Integer> pair : valueList) {
result.put(pair.getKey(), pair.getValue());
}
return result;
});
}
/**
* Sets the host name.
*
* @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl hostName(String hostName) {
this.hostName = hostName;
return this;
}
/**
* Sets and existing {@link CosmosAsyncContainer} to be used to read from the monitored collection.
*
* @param feedDocumentClient the instance of {@link CosmosAsyncContainer} to be used.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl feedContainer(CosmosAsyncContainer feedDocumentClient) {
if (feedDocumentClient == null) {
throw new IllegalArgumentException("feedContextClient");
}
this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient);
return this;
}
/**
* Sets the {@link ChangeFeedProcessorOptions} to be used.
*
* @param changeFeedProcessorOptions the change feed processor options to use.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) {
if (changeFeedProcessorOptions == null) {
throw new IllegalArgumentException("changeFeedProcessorOptions");
}
this.changeFeedProcessorOptions = changeFeedProcessorOptions;
return this;
}
/**
* Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver}
*
* @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) {
if (observerFactory == null) {
throw new IllegalArgumentException("observerFactory");
}
this.observerFactory = observerFactory;
return this;
}
/**
* Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes.
* @param type the type of {@link ChangeFeedObserver} to be used.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observer(Class<? extends ChangeFeedObserver> type) {
if (type == null) {
throw new IllegalArgumentException("type");
}
this.observerFactory = new ChangeFeedObserverFactoryImpl(type);
return this;
}
@Override
public ChangeFeedProcessorBuilderImpl handleChanges(Consumer<List<JsonNode>> consumer) {
return this.observerFactory(new DefaultObserverFactory(consumer));
}
/**
* Sets the database resource ID of the monitored collection.
*
* @param databaseResourceId the database resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) {
this.databaseResourceId = databaseResourceId;
return this;
}
/**
* Sets the collection resource ID of the monitored collection.
* @param collectionResourceId the collection resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) {
this.collectionResourceId = collectionResourceId;
return this;
}
/**
* Sets an existing {@link CosmosAsyncContainer} to be used to read from the leases collection.
*
* @param leaseClient the instance of {@link CosmosAsyncContainer} to use.
* @return current Builder.
*/
@Override
/**
* Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
*
* @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) {
if (loadBalancingStrategy == null) {
throw new IllegalArgumentException("loadBalancingStrategy");
}
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing.
*
* @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) {
if (partitionProcessorFactory == null) {
throw new IllegalArgumentException("partitionProcessorFactory");
}
this.partitionProcessorFactory = partitionProcessorFactory;
return this;
}
/**
* Sets the {@link LeaseStoreManager} to be used to manage leases.
*
* @param leaseStoreManager the instance of {@link LeaseStoreManager} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) {
if (leaseStoreManager == null) {
throw new IllegalArgumentException("leaseStoreManager");
}
this.leaseStoreManager = leaseStoreManager;
return this;
}
/**
* Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation.
*
* @param healthMonitor The instance of {@link HealthMonitor} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) {
if (healthMonitor == null) {
throw new IllegalArgumentException("healthMonitor");
}
this.healthMonitor = healthMonitor;
return this;
}
/**
* Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously.
*
* @return an instance of {@link ChangeFeedProcessor}.
*/
@Override
public ChangeFeedProcessor build() {
if (this.hostName == null) {
throw new IllegalArgumentException("Host name was not specified");
}
if (this.observerFactory == null) {
throw new IllegalArgumentException("Observer was not specified");
}
if (this.scheduler == null) {
this.scheduler = Schedulers.elastic();
}
return this;
}
public ChangeFeedProcessorBuilderImpl() {
this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
this.degreeOfParallelism = 25;
}
public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) {
this.partitionManager = partitionManager;
}
private Mono<ChangeFeedProcessor> initializeCollectionPropertiesForBuild() {
if (this.changeFeedProcessorOptions == null) {
this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
}
return this.feedContextClient
.readDatabase(this.feedContextClient.getDatabaseClient(), null)
.map( databaseResourceResponse -> {
this.databaseResourceId = databaseResourceResponse.getProperties().getId();
return this.databaseResourceId;
})
.flatMap( id -> this.feedContextClient
.readContainer(this.feedContextClient.getContainerClient(), null)
.map(documentCollectionResourceResponse -> {
this.collectionResourceId = documentCollectionResourceResponse.getProperties().getId();
return this;
}));
}
private Mono<LeaseStoreManager> getLeaseStoreManager() {
if (this.leaseStoreManager == null) {
return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null)
.flatMap( collectionSettings -> {
boolean isPartitioned =
collectionSettings.getPartitionKeyDefinition() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths().size() > 0;
if (!isPartitioned || (collectionSettings.getPartitionKeyDefinition().getPaths().size() != 1 || !collectionSettings.getPartitionKeyDefinition().getPaths().get(0).equals("/id"))) {
return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id."));
}
RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory();
String leasePrefix = this.getLeasePrefix();
return LeaseStoreManager.builder()
.leasePrefix(leasePrefix)
.leaseCollectionLink(this.leaseContextClient.getContainerClient())
.leaseContextClient(this.leaseContextClient)
.requestOptionsFactory(requestOptionsFactory)
.hostName(this.hostName)
.build()
.map(manager -> {
this.leaseStoreManager = manager;
return this.leaseStoreManager;
});
});
}
return Mono.just(this.leaseStoreManager);
}
private String getLeasePrefix() {
String optionsPrefix = this.changeFeedProcessorOptions.getLeasePrefix();
if (optionsPrefix == null) {
optionsPrefix = "";
}
URI uri = this.feedContextClient.getServiceEndpoint();
return String.format(
"%s%s_%s_%s",
optionsPrefix,
uri.getHost(),
this.databaseResourceId,
this.collectionResourceId);
}
private Mono<PartitionManager> buildPartitionManager(LeaseStoreManager leaseStoreManager) {
CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency());
PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl(
this.feedContextClient,
this.feedContextClient.getContainerClient(),
leaseStoreManager,
leaseStoreManager,
this.degreeOfParallelism,
this.queryPartitionsMaxBatchSize
);
Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime);
PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl(
factory,
leaseStoreManager,
this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl(
this.feedContextClient,
this.changeFeedProcessorOptions,
leaseStoreManager,
this.feedContextClient.getContainerClient()),
this.changeFeedProcessorOptions,
this.scheduler
);
if (this.loadBalancingStrategy == null) {
this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy(
this.hostName,
this.changeFeedProcessorOptions.getMinScaleCount(),
this.changeFeedProcessorOptions.getMaxScaleCount(),
this.changeFeedProcessorOptions.getLeaseExpirationInterval());
}
PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, scheduler);
if (this.healthMonitor == null) {
this.healthMonitor = new TraceHealthMonitor();
}
PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor);
PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl(
partitionController2,
leaseStoreManager,
this.loadBalancingStrategy,
this.changeFeedProcessorOptions.getLeaseAcquireInterval(),
this.scheduler
);
PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer);
return Mono.just(partitionManager);
}
@Override
public void close() {
this.stop().subscribeOn(Schedulers.elastic()).subscribe();
}
} |
I think we should fail on these cases, logs may go unnoticed by user | public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosAsyncContainer leaseClient) {
if (leaseClient == null) {
throw new IllegalArgumentException("leaseClient");
}
if (!getContextClient(leaseClient).isContentResponseOnWriteEnabled()) {
throw new IllegalArgumentException("leaseClient: content response on write setting must be enabled");
}
ConsistencyLevel consistencyLevel = getContextClient(leaseClient).getConsistencyLevel();
if (consistencyLevel == ConsistencyLevel.CONSISTENT_PREFIX || consistencyLevel == ConsistencyLevel.EVENTUAL) {
logger.warn("leaseClient consistency level setting are less then expected which is SESSION");
}
this.leaseContextClient = new ChangeFeedContextClientImpl(leaseClient);
return this;
} | logger.warn("leaseClient consistency level setting are less then expected which is SESSION"); | public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosAsyncContainer leaseClient) {
if (leaseClient == null) {
throw new IllegalArgumentException("leaseClient");
}
if (!getContextClient(leaseClient).isContentResponseOnWriteEnabled()) {
throw new IllegalArgumentException("leaseClient: content response on write setting must be enabled");
}
ConsistencyLevel consistencyLevel = getContextClient(leaseClient).getConsistencyLevel();
if (consistencyLevel == ConsistencyLevel.CONSISTENT_PREFIX || consistencyLevel == ConsistencyLevel.EVENTUAL) {
logger.warn("leaseClient consistency level setting are less then expected which is SESSION");
}
this.leaseContextClient = new ChangeFeedContextClientImpl(leaseClient);
return this;
} | class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(ChangeFeedProcessorBuilderImpl.class);
private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis();
private final Duration sleepTime = Duration.ofSeconds(15);
private final Duration lockTime = Duration.ofSeconds(30);
private static final int DefaultQueryPartitionsMaxBatchSize = 100;
private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
private int degreeOfParallelism = 25;
private String hostName;
private ChangeFeedContextClient feedContextClient;
private ChangeFeedProcessorOptions changeFeedProcessorOptions;
private ChangeFeedObserverFactory observerFactory;
private volatile String databaseResourceId;
private volatile String collectionResourceId;
private ChangeFeedContextClient leaseContextClient;
private PartitionLoadBalancingStrategy loadBalancingStrategy;
private PartitionProcessorFactory partitionProcessorFactory;
private LeaseStoreManager leaseStoreManager;
private HealthMonitor healthMonitor;
private volatile PartitionManager partitionManager;
private Scheduler scheduler;
/**
* Start listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> start() {
if (this.partitionManager == null) {
return this.initializeCollectionPropertiesForBuild()
.flatMap( value -> this.getLeaseStoreManager()
.flatMap(leaseStoreManager -> this.buildPartitionManager(leaseStoreManager)))
.flatMap(partitionManager1 -> {
this.partitionManager = partitionManager1;
return this.partitionManager.start();
});
} else {
return partitionManager.start();
}
}
/**
* Stops listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> stop() {
if (this.partitionManager == null || !this.partitionManager.isRunning()) {
throw new IllegalStateException("The ChangeFeedProcessor instance has not fully started");
}
return this.partitionManager.stop();
}
/**
* Returns the state of the change feed processor.
*
* @return true if the change feed processor is currently active and running.
*/
@Override
public boolean isStarted() {
return this.partitionManager != null && this.partitionManager.isRunning();
}
/**
* Returns the current owner (host) and an approximation of the difference between the last processed item (defined
* by the state of the feed container) and the latest change in the container for each partition (lease
* document).
* <p>
* An empty map will be returned if the processor was not started or no lease documents matching the current
* {@link ChangeFeedProcessor} instance's lease prefix could be found.
*
* @return a map representing the current owner and lease token, the current LSN and latest LSN, and the estimated
* lag, asynchronously.
*/
@Override
public Mono<Map<String, Integer>> getEstimatedLag() {
Map<String, Integer> earlyResult = new ConcurrentHashMap<>();
if (this.leaseStoreManager == null || this.feedContextClient == null) {
return Mono.just(earlyResult);
}
return this.leaseStoreManager.getAllLeases()
.flatMap(lease -> {
ChangeFeedOptions options = new ChangeFeedOptions()
.setMaxItemCount(1)
.setPartitionKeyRangeId(lease.getLeaseToken())
.setStartFromBeginning(true)
.setRequestContinuation(lease.getContinuationToken());
return this.feedContextClient.createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options)
.take(1)
.map(feedResponse -> {
final String pkRangeIdSeparator = ":";
final String segmentSeparator = "
final String lsnPropertyName = "_lsn";
String ownerValue = lease.getOwner();
String sessionTokenLsn = feedResponse.getSessionToken();
String parsedSessionToken = sessionTokenLsn.substring(sessionTokenLsn.indexOf(pkRangeIdSeparator));
String[] segments = parsedSessionToken.split(segmentSeparator);
String latestLsn = segments[0];
if (segments.length >= 2) {
latestLsn = segments[1];
}
if (ownerValue == null) {
ownerValue = "";
}
if (feedResponse.getResults() == null || feedResponse.getResults().size() == 0) {
return Pair.of(ownerValue + "_" + lease.getLeaseToken(), 0);
}
Integer currentLsn = 0;
Integer estimatedLag = 0;
try {
currentLsn = Integer.valueOf(feedResponse.getResults().get(0).get(lsnPropertyName).asText("0"));
estimatedLag = Integer.valueOf(latestLsn);
estimatedLag = estimatedLag - currentLsn + 1;
} catch (NumberFormatException ex) {
logger.warn("Unexpected Cosmos LSN found", ex);
estimatedLag = -1;
}
return Pair.of(ownerValue + "_" + lease.getLeaseToken() + "_" + currentLsn + "_" + latestLsn, estimatedLag);
});
})
.collectList()
.map(valueList -> {
Map<String, Integer> result = new ConcurrentHashMap<>();
for (Pair<String, Integer> pair : valueList) {
result.put(pair.getKey(), pair.getValue());
}
return result;
});
}
/**
* Sets the host name.
*
* @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl hostName(String hostName) {
this.hostName = hostName;
return this;
}
/**
* Sets and existing {@link CosmosAsyncContainer} to be used to read from the monitored collection.
*
* @param feedDocumentClient the instance of {@link CosmosAsyncContainer} to be used.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl feedContainer(CosmosAsyncContainer feedDocumentClient) {
if (feedDocumentClient == null) {
throw new IllegalArgumentException("feedContextClient");
}
this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient);
return this;
}
/**
* Sets the {@link ChangeFeedProcessorOptions} to be used.
*
* @param changeFeedProcessorOptions the change feed processor options to use.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) {
if (changeFeedProcessorOptions == null) {
throw new IllegalArgumentException("changeFeedProcessorOptions");
}
this.changeFeedProcessorOptions = changeFeedProcessorOptions;
return this;
}
/**
* Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver}
*
* @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) {
if (observerFactory == null) {
throw new IllegalArgumentException("observerFactory");
}
this.observerFactory = observerFactory;
return this;
}
/**
* Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes.
* @param type the type of {@link ChangeFeedObserver} to be used.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observer(Class<? extends ChangeFeedObserver> type) {
if (type == null) {
throw new IllegalArgumentException("type");
}
this.observerFactory = new ChangeFeedObserverFactoryImpl(type);
return this;
}
@Override
public ChangeFeedProcessorBuilderImpl handleChanges(Consumer<List<JsonNode>> consumer) {
return this.observerFactory(new DefaultObserverFactory(consumer));
}
/**
* Sets the database resource ID of the monitored collection.
*
* @param databaseResourceId the database resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) {
this.databaseResourceId = databaseResourceId;
return this;
}
/**
* Sets the collection resource ID of the monitored collection.
* @param collectionResourceId the collection resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) {
this.collectionResourceId = collectionResourceId;
return this;
}
/**
* Sets an existing {@link CosmosAsyncContainer} to be used to read from the leases collection.
*
* @param leaseClient the instance of {@link CosmosAsyncContainer} to use.
* @return current Builder.
*/
@Override
/**
* Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
*
* @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) {
if (loadBalancingStrategy == null) {
throw new IllegalArgumentException("loadBalancingStrategy");
}
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing.
*
* @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) {
if (partitionProcessorFactory == null) {
throw new IllegalArgumentException("partitionProcessorFactory");
}
this.partitionProcessorFactory = partitionProcessorFactory;
return this;
}
/**
* Sets the {@link LeaseStoreManager} to be used to manage leases.
*
* @param leaseStoreManager the instance of {@link LeaseStoreManager} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) {
if (leaseStoreManager == null) {
throw new IllegalArgumentException("leaseStoreManager");
}
this.leaseStoreManager = leaseStoreManager;
return this;
}
/**
* Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation.
*
* @param healthMonitor The instance of {@link HealthMonitor} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) {
if (healthMonitor == null) {
throw new IllegalArgumentException("healthMonitor");
}
this.healthMonitor = healthMonitor;
return this;
}
/**
* Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously.
*
* @return an instance of {@link ChangeFeedProcessor}.
*/
@Override
public ChangeFeedProcessor build() {
if (this.hostName == null) {
throw new IllegalArgumentException("Host name was not specified");
}
if (this.observerFactory == null) {
throw new IllegalArgumentException("Observer was not specified");
}
if (this.scheduler == null) {
this.scheduler = Schedulers.elastic();
}
return this;
}
public ChangeFeedProcessorBuilderImpl() {
this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
this.degreeOfParallelism = 25;
}
public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) {
this.partitionManager = partitionManager;
}
private Mono<ChangeFeedProcessor> initializeCollectionPropertiesForBuild() {
if (this.changeFeedProcessorOptions == null) {
this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
}
return this.feedContextClient
.readDatabase(this.feedContextClient.getDatabaseClient(), null)
.map( databaseResourceResponse -> {
this.databaseResourceId = databaseResourceResponse.getProperties().getId();
return this.databaseResourceId;
})
.flatMap( id -> this.feedContextClient
.readContainer(this.feedContextClient.getContainerClient(), null)
.map(documentCollectionResourceResponse -> {
this.collectionResourceId = documentCollectionResourceResponse.getProperties().getId();
return this;
}));
}
private Mono<LeaseStoreManager> getLeaseStoreManager() {
if (this.leaseStoreManager == null) {
return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null)
.flatMap( collectionSettings -> {
boolean isPartitioned =
collectionSettings.getPartitionKeyDefinition() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths().size() > 0;
if (!isPartitioned || (collectionSettings.getPartitionKeyDefinition().getPaths().size() != 1 || !collectionSettings.getPartitionKeyDefinition().getPaths().get(0).equals("/id"))) {
return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id."));
}
RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory();
String leasePrefix = this.getLeasePrefix();
return LeaseStoreManager.builder()
.leasePrefix(leasePrefix)
.leaseCollectionLink(this.leaseContextClient.getContainerClient())
.leaseContextClient(this.leaseContextClient)
.requestOptionsFactory(requestOptionsFactory)
.hostName(this.hostName)
.build()
.map(manager -> {
this.leaseStoreManager = manager;
return this.leaseStoreManager;
});
});
}
return Mono.just(this.leaseStoreManager);
}
private String getLeasePrefix() {
String optionsPrefix = this.changeFeedProcessorOptions.getLeasePrefix();
if (optionsPrefix == null) {
optionsPrefix = "";
}
URI uri = this.feedContextClient.getServiceEndpoint();
return String.format(
"%s%s_%s_%s",
optionsPrefix,
uri.getHost(),
this.databaseResourceId,
this.collectionResourceId);
}
private Mono<PartitionManager> buildPartitionManager(LeaseStoreManager leaseStoreManager) {
CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency());
PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl(
this.feedContextClient,
this.feedContextClient.getContainerClient(),
leaseStoreManager,
leaseStoreManager,
this.degreeOfParallelism,
this.queryPartitionsMaxBatchSize
);
Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime);
PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl(
factory,
leaseStoreManager,
this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl(
this.feedContextClient,
this.changeFeedProcessorOptions,
leaseStoreManager,
this.feedContextClient.getContainerClient()),
this.changeFeedProcessorOptions,
this.scheduler
);
if (this.loadBalancingStrategy == null) {
this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy(
this.hostName,
this.changeFeedProcessorOptions.getMinScaleCount(),
this.changeFeedProcessorOptions.getMaxScaleCount(),
this.changeFeedProcessorOptions.getLeaseExpirationInterval());
}
PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, scheduler);
if (this.healthMonitor == null) {
this.healthMonitor = new TraceHealthMonitor();
}
PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor);
PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl(
partitionController2,
leaseStoreManager,
this.loadBalancingStrategy,
this.changeFeedProcessorOptions.getLeaseAcquireInterval(),
this.scheduler
);
PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer);
return Mono.just(partitionManager);
}
@Override
public void close() {
this.stop().subscribeOn(Schedulers.elastic()).subscribe();
}
} | class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(ChangeFeedProcessorBuilderImpl.class);
private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis();
private final Duration sleepTime = Duration.ofSeconds(15);
private final Duration lockTime = Duration.ofSeconds(30);
private static final int DefaultQueryPartitionsMaxBatchSize = 100;
private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
private int degreeOfParallelism = 25;
private String hostName;
private ChangeFeedContextClient feedContextClient;
private ChangeFeedProcessorOptions changeFeedProcessorOptions;
private ChangeFeedObserverFactory observerFactory;
private volatile String databaseResourceId;
private volatile String collectionResourceId;
private ChangeFeedContextClient leaseContextClient;
private PartitionLoadBalancingStrategy loadBalancingStrategy;
private PartitionProcessorFactory partitionProcessorFactory;
private LeaseStoreManager leaseStoreManager;
private HealthMonitor healthMonitor;
private volatile PartitionManager partitionManager;
private Scheduler scheduler;
/**
* Start listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> start() {
if (this.partitionManager == null) {
return this.initializeCollectionPropertiesForBuild()
.flatMap( value -> this.getLeaseStoreManager()
.flatMap(leaseStoreManager -> this.buildPartitionManager(leaseStoreManager)))
.flatMap(partitionManager1 -> {
this.partitionManager = partitionManager1;
return this.partitionManager.start();
});
} else {
return partitionManager.start();
}
}
/**
* Stops listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> stop() {
if (this.partitionManager == null || !this.partitionManager.isRunning()) {
throw new IllegalStateException("The ChangeFeedProcessor instance has not fully started");
}
return this.partitionManager.stop();
}
/**
* Returns the state of the change feed processor.
*
* @return true if the change feed processor is currently active and running.
*/
@Override
public boolean isStarted() {
return this.partitionManager != null && this.partitionManager.isRunning();
}
/**
* Returns the current owner (host) and an approximation of the difference between the last processed item (defined
* by the state of the feed container) and the latest change in the container for each partition (lease
* document).
* <p>
* An empty map will be returned if the processor was not started or no lease documents matching the current
* {@link ChangeFeedProcessor} instance's lease prefix could be found.
*
* @return a map representing the current owner and lease token, the current LSN and latest LSN, and the estimated
* lag, asynchronously.
*/
@Override
public Mono<Map<String, Integer>> getEstimatedLag() {
Map<String, Integer> earlyResult = new ConcurrentHashMap<>();
if (this.leaseStoreManager == null || this.feedContextClient == null) {
return Mono.just(earlyResult);
}
return this.leaseStoreManager.getAllLeases()
.flatMap(lease -> {
ChangeFeedOptions options = new ChangeFeedOptions()
.setMaxItemCount(1)
.setPartitionKeyRangeId(lease.getLeaseToken())
.setStartFromBeginning(true)
.setRequestContinuation(lease.getContinuationToken());
return this.feedContextClient.createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options)
.take(1)
.map(feedResponse -> {
final String pkRangeIdSeparator = ":";
final String segmentSeparator = "
final String lsnPropertyName = "_lsn";
String ownerValue = lease.getOwner();
String sessionTokenLsn = feedResponse.getSessionToken();
String parsedSessionToken = sessionTokenLsn.substring(sessionTokenLsn.indexOf(pkRangeIdSeparator));
String[] segments = parsedSessionToken.split(segmentSeparator);
String latestLsn = segments[0];
if (segments.length >= 2) {
latestLsn = segments[1];
}
if (ownerValue == null) {
ownerValue = "";
}
if (feedResponse.getResults() == null || feedResponse.getResults().size() == 0) {
return Pair.of(ownerValue + "_" + lease.getLeaseToken(), 0);
}
Integer currentLsn = 0;
Integer estimatedLag = 0;
try {
currentLsn = Integer.valueOf(feedResponse.getResults().get(0).get(lsnPropertyName).asText("0"));
estimatedLag = Integer.valueOf(latestLsn);
estimatedLag = estimatedLag - currentLsn + 1;
} catch (NumberFormatException ex) {
logger.warn("Unexpected Cosmos LSN found", ex);
estimatedLag = -1;
}
return Pair.of(ownerValue + "_" + lease.getLeaseToken() + "_" + currentLsn + "_" + latestLsn, estimatedLag);
});
})
.collectList()
.map(valueList -> {
Map<String, Integer> result = new ConcurrentHashMap<>();
for (Pair<String, Integer> pair : valueList) {
result.put(pair.getKey(), pair.getValue());
}
return result;
});
}
/**
* Sets the host name.
*
* @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl hostName(String hostName) {
this.hostName = hostName;
return this;
}
/**
* Sets and existing {@link CosmosAsyncContainer} to be used to read from the monitored collection.
*
* @param feedDocumentClient the instance of {@link CosmosAsyncContainer} to be used.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl feedContainer(CosmosAsyncContainer feedDocumentClient) {
if (feedDocumentClient == null) {
throw new IllegalArgumentException("feedContextClient");
}
this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient);
return this;
}
/**
* Sets the {@link ChangeFeedProcessorOptions} to be used.
*
* @param changeFeedProcessorOptions the change feed processor options to use.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) {
if (changeFeedProcessorOptions == null) {
throw new IllegalArgumentException("changeFeedProcessorOptions");
}
this.changeFeedProcessorOptions = changeFeedProcessorOptions;
return this;
}
/**
* Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver}
*
* @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) {
if (observerFactory == null) {
throw new IllegalArgumentException("observerFactory");
}
this.observerFactory = observerFactory;
return this;
}
/**
* Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes.
* @param type the type of {@link ChangeFeedObserver} to be used.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observer(Class<? extends ChangeFeedObserver> type) {
if (type == null) {
throw new IllegalArgumentException("type");
}
this.observerFactory = new ChangeFeedObserverFactoryImpl(type);
return this;
}
@Override
public ChangeFeedProcessorBuilderImpl handleChanges(Consumer<List<JsonNode>> consumer) {
return this.observerFactory(new DefaultObserverFactory(consumer));
}
/**
* Sets the database resource ID of the monitored collection.
*
* @param databaseResourceId the database resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) {
this.databaseResourceId = databaseResourceId;
return this;
}
/**
* Sets the collection resource ID of the monitored collection.
* @param collectionResourceId the collection resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) {
this.collectionResourceId = collectionResourceId;
return this;
}
/**
* Sets an existing {@link CosmosAsyncContainer} to be used to read from the leases collection.
*
* @param leaseClient the instance of {@link CosmosAsyncContainer} to use.
* @return current Builder.
*/
@Override
/**
* Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
*
* @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) {
if (loadBalancingStrategy == null) {
throw new IllegalArgumentException("loadBalancingStrategy");
}
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing.
*
* @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) {
if (partitionProcessorFactory == null) {
throw new IllegalArgumentException("partitionProcessorFactory");
}
this.partitionProcessorFactory = partitionProcessorFactory;
return this;
}
/**
* Sets the {@link LeaseStoreManager} to be used to manage leases.
*
* @param leaseStoreManager the instance of {@link LeaseStoreManager} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) {
if (leaseStoreManager == null) {
throw new IllegalArgumentException("leaseStoreManager");
}
this.leaseStoreManager = leaseStoreManager;
return this;
}
/**
* Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation.
*
* @param healthMonitor The instance of {@link HealthMonitor} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) {
if (healthMonitor == null) {
throw new IllegalArgumentException("healthMonitor");
}
this.healthMonitor = healthMonitor;
return this;
}
/**
* Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously.
*
* @return an instance of {@link ChangeFeedProcessor}.
*/
@Override
public ChangeFeedProcessor build() {
if (this.hostName == null) {
throw new IllegalArgumentException("Host name was not specified");
}
if (this.observerFactory == null) {
throw new IllegalArgumentException("Observer was not specified");
}
if (this.scheduler == null) {
this.scheduler = Schedulers.elastic();
}
return this;
}
public ChangeFeedProcessorBuilderImpl() {
this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
this.degreeOfParallelism = 25;
}
public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) {
this.partitionManager = partitionManager;
}
private Mono<ChangeFeedProcessor> initializeCollectionPropertiesForBuild() {
if (this.changeFeedProcessorOptions == null) {
this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
}
return this.feedContextClient
.readDatabase(this.feedContextClient.getDatabaseClient(), null)
.map( databaseResourceResponse -> {
this.databaseResourceId = databaseResourceResponse.getProperties().getId();
return this.databaseResourceId;
})
.flatMap( id -> this.feedContextClient
.readContainer(this.feedContextClient.getContainerClient(), null)
.map(documentCollectionResourceResponse -> {
this.collectionResourceId = documentCollectionResourceResponse.getProperties().getId();
return this;
}));
}
private Mono<LeaseStoreManager> getLeaseStoreManager() {
if (this.leaseStoreManager == null) {
return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null)
.flatMap( collectionSettings -> {
boolean isPartitioned =
collectionSettings.getPartitionKeyDefinition() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths().size() > 0;
if (!isPartitioned || (collectionSettings.getPartitionKeyDefinition().getPaths().size() != 1 || !collectionSettings.getPartitionKeyDefinition().getPaths().get(0).equals("/id"))) {
return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id."));
}
RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory();
String leasePrefix = this.getLeasePrefix();
return LeaseStoreManager.builder()
.leasePrefix(leasePrefix)
.leaseCollectionLink(this.leaseContextClient.getContainerClient())
.leaseContextClient(this.leaseContextClient)
.requestOptionsFactory(requestOptionsFactory)
.hostName(this.hostName)
.build()
.map(manager -> {
this.leaseStoreManager = manager;
return this.leaseStoreManager;
});
});
}
return Mono.just(this.leaseStoreManager);
}
private String getLeasePrefix() {
String optionsPrefix = this.changeFeedProcessorOptions.getLeasePrefix();
if (optionsPrefix == null) {
optionsPrefix = "";
}
URI uri = this.feedContextClient.getServiceEndpoint();
return String.format(
"%s%s_%s_%s",
optionsPrefix,
uri.getHost(),
this.databaseResourceId,
this.collectionResourceId);
}
private Mono<PartitionManager> buildPartitionManager(LeaseStoreManager leaseStoreManager) {
CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency());
PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl(
this.feedContextClient,
this.feedContextClient.getContainerClient(),
leaseStoreManager,
leaseStoreManager,
this.degreeOfParallelism,
this.queryPartitionsMaxBatchSize
);
Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime);
PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl(
factory,
leaseStoreManager,
this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl(
this.feedContextClient,
this.changeFeedProcessorOptions,
leaseStoreManager,
this.feedContextClient.getContainerClient()),
this.changeFeedProcessorOptions,
this.scheduler
);
if (this.loadBalancingStrategy == null) {
this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy(
this.hostName,
this.changeFeedProcessorOptions.getMinScaleCount(),
this.changeFeedProcessorOptions.getMaxScaleCount(),
this.changeFeedProcessorOptions.getLeaseExpirationInterval());
}
PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, scheduler);
if (this.healthMonitor == null) {
this.healthMonitor = new TraceHealthMonitor();
}
PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor);
PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl(
partitionController2,
leaseStoreManager,
this.loadBalancingStrategy,
this.changeFeedProcessorOptions.getLeaseAcquireInterval(),
this.scheduler
);
PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer);
return Mono.just(partitionManager);
}
@Override
public void close() {
this.stop().subscribeOn(Schedulers.elastic()).subscribe();
}
} |
this is called `setContainerName` but in the test `.setContainerName("AQAAAJ0fgTc=")` uses portion of the selflink. So is this the container name or just a portion of the selflink? | public void createPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=")
.setResourcePath(CosmosContainerChildResourceKind.ITEM, "doc1");
Mono<CosmosPermissionResponse> createObservable = createdUser.createPermission(permissionSettings, null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.withPermissionResourceKind(CosmosContainerChildResourceKind.ITEM)
.withPermissionResourceName("doc1")
.notNullEtag()
.build();
validateSuccess(createObservable, validator);
} | .setContainerName("AQAAAJ0fgTc=") | public void createPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer")
.setResourcePath(ContainerChildResourceType.ITEM, "doc1");
Mono<CosmosPermissionResponse> createObservable = createdUser.createPermission(permissionSettings, null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.withPermissionResourceKind(ContainerChildResourceType.ITEM)
.withPermissionResourceName("doc1")
.notNullEtag()
.build();
validateSuccess(createObservable, validator);
} | class PermissionCrudTest extends TestSuiteBase {
private CosmosAsyncDatabase createdDatabase;
private CosmosAsyncUser createdUser;
private final String databaseId = CosmosDatabaseForTest.generateId();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public PermissionCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
createdUser.createPermission(permissionSettings, null).block();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deletePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> deleteObservable = readBackPermission.delete(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.nullResource()
.build();
validateSuccess(deleteObservable, validator);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read( null);
FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build();
validateFailure(readObservable, notFoundValidator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void upsertPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null)
.block();
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read( null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = createdUser.upsertPermission(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replacePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
String id = UUID.randomUUID().toString();
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(id)
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionResponse.getProperties().getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = readBackPermission.replace(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT)
public void before_PermissionCrudTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = createDatabase(client, databaseId);
}
@AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private static CosmosUserProperties getUserDefinition() {
return new CosmosUserProperties()
.setId(UUID.randomUUID().toString());
}
} | class PermissionCrudTest extends TestSuiteBase {
private CosmosAsyncDatabase createdDatabase;
private CosmosAsyncUser createdUser;
private final String databaseId = CosmosDatabaseForTest.generateId();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public PermissionCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
createdUser.createPermission(permissionSettings, null).block();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deletePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> deleteObservable = readBackPermission.delete(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.nullResource()
.build();
validateSuccess(deleteObservable, validator);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read( null);
FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build();
validateFailure(readObservable, notFoundValidator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void upsertPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null)
.block();
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read( null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = createdUser.upsertPermission(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replacePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
String id = UUID.randomUUID().toString();
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(id)
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionResponse.getProperties().getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = readBackPermission.replace(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT)
public void before_PermissionCrudTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = createDatabase(client, databaseId);
}
@AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private static CosmosUserProperties getUserDefinition() {
return new CosmosUserProperties()
.setId(UUID.randomUUID().toString());
}
} |
this is called withPermissionContainerName but in the test `. withPermissionContainerName("AQAAAJ0fgTc=")` uses portion of the selflink. So is this the container name or just a portion of the selflink? ditto | public void readPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
createdUser.createPermission(permissionSettings, null).block();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
} | .withPermissionContainerName("AQAAAJ0fgTc=") | public void readPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
createdUser.createPermission(permissionSettings, null).block();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
} | class PermissionCrudTest extends TestSuiteBase {
private CosmosAsyncDatabase createdDatabase;
private CosmosAsyncUser createdUser;
private final String databaseId = CosmosDatabaseForTest.generateId();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public PermissionCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=")
.setResourcePath(CosmosContainerChildResourceKind.ITEM, "doc1");
Mono<CosmosPermissionResponse> createObservable = createdUser.createPermission(permissionSettings, null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.withPermissionResourceKind(CosmosContainerChildResourceKind.ITEM)
.withPermissionResourceName("doc1")
.notNullEtag()
.build();
validateSuccess(createObservable, validator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deletePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> deleteObservable = readBackPermission.delete(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.nullResource()
.build();
validateSuccess(deleteObservable, validator);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read( null);
FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build();
validateFailure(readObservable, notFoundValidator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void upsertPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null)
.block();
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read( null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = createdUser.upsertPermission(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replacePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
String id = UUID.randomUUID().toString();
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(id)
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionResponse.getProperties().getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = readBackPermission.replace(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT)
public void before_PermissionCrudTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = createDatabase(client, databaseId);
}
@AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private static CosmosUserProperties getUserDefinition() {
return new CosmosUserProperties()
.setId(UUID.randomUUID().toString());
}
} | class PermissionCrudTest extends TestSuiteBase {
private CosmosAsyncDatabase createdDatabase;
private CosmosAsyncUser createdUser;
private final String databaseId = CosmosDatabaseForTest.generateId();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public PermissionCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void createPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer")
.setResourcePath(ContainerChildResourceType.ITEM, "doc1");
Mono<CosmosPermissionResponse> createObservable = createdUser.createPermission(permissionSettings, null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.withPermissionResourceKind(ContainerChildResourceType.ITEM)
.withPermissionResourceName("doc1")
.notNullEtag()
.build();
validateSuccess(createObservable, validator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deletePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> deleteObservable = readBackPermission.delete(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.nullResource()
.build();
validateSuccess(deleteObservable, validator);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read( null);
FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build();
validateFailure(readObservable, notFoundValidator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void upsertPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null)
.block();
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read( null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = createdUser.upsertPermission(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replacePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
String id = UUID.randomUUID().toString();
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(id)
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionResponse.getProperties().getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = readBackPermission.replace(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT)
public void before_PermissionCrudTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = createDatabase(client, databaseId);
}
@AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private static CosmosUserProperties getUserDefinition() {
return new CosmosUserProperties()
.setId(UUID.randomUUID().toString());
}
} |
it's the container name... I will make a change to make it more obvious; initially I did not want to mess with the test used/expected values. | public void createPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=")
.setResourcePath(CosmosContainerChildResourceKind.ITEM, "doc1");
Mono<CosmosPermissionResponse> createObservable = createdUser.createPermission(permissionSettings, null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.withPermissionResourceKind(CosmosContainerChildResourceKind.ITEM)
.withPermissionResourceName("doc1")
.notNullEtag()
.build();
validateSuccess(createObservable, validator);
} | .setContainerName("AQAAAJ0fgTc=") | public void createPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer")
.setResourcePath(ContainerChildResourceType.ITEM, "doc1");
Mono<CosmosPermissionResponse> createObservable = createdUser.createPermission(permissionSettings, null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.withPermissionResourceKind(ContainerChildResourceType.ITEM)
.withPermissionResourceName("doc1")
.notNullEtag()
.build();
validateSuccess(createObservable, validator);
} | class PermissionCrudTest extends TestSuiteBase {
private CosmosAsyncDatabase createdDatabase;
private CosmosAsyncUser createdUser;
private final String databaseId = CosmosDatabaseForTest.generateId();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public PermissionCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
createdUser.createPermission(permissionSettings, null).block();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deletePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> deleteObservable = readBackPermission.delete(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.nullResource()
.build();
validateSuccess(deleteObservable, validator);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read( null);
FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build();
validateFailure(readObservable, notFoundValidator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void upsertPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null)
.block();
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read( null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = createdUser.upsertPermission(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replacePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
String id = UUID.randomUUID().toString();
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(id)
.setPermissionMode(PermissionMode.READ)
.setContainerName("AQAAAJ0fgTc=");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionResponse.getProperties().getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = readBackPermission.replace(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("AQAAAJ0fgTc=")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT)
public void before_PermissionCrudTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = createDatabase(client, databaseId);
}
@AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private static CosmosUserProperties getUserDefinition() {
return new CosmosUserProperties()
.setId(UUID.randomUUID().toString());
}
} | class PermissionCrudTest extends TestSuiteBase {
private CosmosAsyncDatabase createdDatabase;
private CosmosAsyncUser createdUser;
private final String databaseId = CosmosDatabaseForTest.generateId();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public PermissionCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void readPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
createdUser.createPermission(permissionSettings, null).block();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(permissionSettings.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void deletePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> deleteObservable = readBackPermission.delete(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.nullResource()
.build();
validateSuccess(deleteObservable, validator);
waitIfNeededForReplicasToCatchUp(getClientBuilder());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read( null);
FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build();
validateFailure(readObservable, notFoundValidator);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void upsertPermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(UUID.randomUUID().toString())
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null)
.block();
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
Mono<CosmosPermissionResponse> readObservable = createdUser.getPermission(permissionSettings.getId()).read( null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = createdUser.upsertPermission(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@Test(groups = { "simple" }, timeOut = TIMEOUT)
public void replacePermission() throws Exception {
createdUser = safeCreateUser(client, createdDatabase.getId(), getUserDefinition());
String id = UUID.randomUUID().toString();
CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties()
.setId(id)
.setPermissionMode(PermissionMode.READ)
.setContainerName("myContainer");
CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null).block();
CosmosAsyncPermission readBackPermission = createdUser.getPermission(permissionSettings.getId());
Mono<CosmosPermissionResponse> readObservable = readBackPermission.read(null);
CosmosResponseValidator<CosmosPermissionResponse> validator = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionResponse.getProperties().getId())
.withPermissionMode(PermissionMode.READ)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(readObservable, validator);
CosmosPermissionProperties readBackPermissionProperties = readBackPermissionResponse.getProperties();
readBackPermissionProperties = readBackPermissionProperties.setPermissionMode(PermissionMode.ALL);
Mono<CosmosPermissionResponse> updateObservable = readBackPermission.replace(readBackPermissionProperties, null);
CosmosResponseValidator<CosmosPermissionResponse> validatorForUpdate = new CosmosResponseValidator.Builder<CosmosPermissionResponse>()
.withId(readBackPermissionProperties.getId())
.withPermissionMode(PermissionMode.ALL)
.withPermissionContainerName("myContainer")
.notNullEtag()
.build();
validateSuccess(updateObservable, validatorForUpdate);
}
@BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT)
public void before_PermissionCrudTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = createDatabase(client, databaseId);
}
@AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private static CosmosUserProperties getUserDefinition() {
return new CosmosUserProperties()
.setId(UUID.randomUUID().toString());
}
} |
The CFP should continue to work with lesser than SESSION consistency level; the downside of it is that lease documents might not be updated in a timely fashion by the current CFP instance in certain conditions which at worst can lead to documents being seen more than once. SESSION or better will help avoid that because once the lease document is written, the CFP instance will have it right away in most scenarios across all its workers. | public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosAsyncContainer leaseClient) {
if (leaseClient == null) {
throw new IllegalArgumentException("leaseClient");
}
if (!getContextClient(leaseClient).isContentResponseOnWriteEnabled()) {
throw new IllegalArgumentException("leaseClient: content response on write setting must be enabled");
}
ConsistencyLevel consistencyLevel = getContextClient(leaseClient).getConsistencyLevel();
if (consistencyLevel == ConsistencyLevel.CONSISTENT_PREFIX || consistencyLevel == ConsistencyLevel.EVENTUAL) {
logger.warn("leaseClient consistency level setting are less then expected which is SESSION");
}
this.leaseContextClient = new ChangeFeedContextClientImpl(leaseClient);
return this;
} | logger.warn("leaseClient consistency level setting are less then expected which is SESSION"); | public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosAsyncContainer leaseClient) {
if (leaseClient == null) {
throw new IllegalArgumentException("leaseClient");
}
if (!getContextClient(leaseClient).isContentResponseOnWriteEnabled()) {
throw new IllegalArgumentException("leaseClient: content response on write setting must be enabled");
}
ConsistencyLevel consistencyLevel = getContextClient(leaseClient).getConsistencyLevel();
if (consistencyLevel == ConsistencyLevel.CONSISTENT_PREFIX || consistencyLevel == ConsistencyLevel.EVENTUAL) {
logger.warn("leaseClient consistency level setting are less then expected which is SESSION");
}
this.leaseContextClient = new ChangeFeedContextClientImpl(leaseClient);
return this;
} | class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(ChangeFeedProcessorBuilderImpl.class);
private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis();
private final Duration sleepTime = Duration.ofSeconds(15);
private final Duration lockTime = Duration.ofSeconds(30);
private static final int DefaultQueryPartitionsMaxBatchSize = 100;
private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
private int degreeOfParallelism = 25;
private String hostName;
private ChangeFeedContextClient feedContextClient;
private ChangeFeedProcessorOptions changeFeedProcessorOptions;
private ChangeFeedObserverFactory observerFactory;
private volatile String databaseResourceId;
private volatile String collectionResourceId;
private ChangeFeedContextClient leaseContextClient;
private PartitionLoadBalancingStrategy loadBalancingStrategy;
private PartitionProcessorFactory partitionProcessorFactory;
private LeaseStoreManager leaseStoreManager;
private HealthMonitor healthMonitor;
private volatile PartitionManager partitionManager;
private Scheduler scheduler;
/**
* Start listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> start() {
if (this.partitionManager == null) {
return this.initializeCollectionPropertiesForBuild()
.flatMap( value -> this.getLeaseStoreManager()
.flatMap(leaseStoreManager -> this.buildPartitionManager(leaseStoreManager)))
.flatMap(partitionManager1 -> {
this.partitionManager = partitionManager1;
return this.partitionManager.start();
});
} else {
return partitionManager.start();
}
}
/**
* Stops listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> stop() {
if (this.partitionManager == null || !this.partitionManager.isRunning()) {
throw new IllegalStateException("The ChangeFeedProcessor instance has not fully started");
}
return this.partitionManager.stop();
}
/**
* Returns the state of the change feed processor.
*
* @return true if the change feed processor is currently active and running.
*/
@Override
public boolean isStarted() {
return this.partitionManager != null && this.partitionManager.isRunning();
}
/**
* Returns the current owner (host) and an approximation of the difference between the last processed item (defined
* by the state of the feed container) and the latest change in the container for each partition (lease
* document).
* <p>
* An empty map will be returned if the processor was not started or no lease documents matching the current
* {@link ChangeFeedProcessor} instance's lease prefix could be found.
*
* @return a map representing the current owner and lease token, the current LSN and latest LSN, and the estimated
* lag, asynchronously.
*/
@Override
public Mono<Map<String, Integer>> getEstimatedLag() {
Map<String, Integer> earlyResult = new ConcurrentHashMap<>();
if (this.leaseStoreManager == null || this.feedContextClient == null) {
return Mono.just(earlyResult);
}
return this.leaseStoreManager.getAllLeases()
.flatMap(lease -> {
ChangeFeedOptions options = new ChangeFeedOptions()
.setMaxItemCount(1)
.setPartitionKeyRangeId(lease.getLeaseToken())
.setStartFromBeginning(true)
.setRequestContinuation(lease.getContinuationToken());
return this.feedContextClient.createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options)
.take(1)
.map(feedResponse -> {
final String pkRangeIdSeparator = ":";
final String segmentSeparator = "
final String lsnPropertyName = "_lsn";
String ownerValue = lease.getOwner();
String sessionTokenLsn = feedResponse.getSessionToken();
String parsedSessionToken = sessionTokenLsn.substring(sessionTokenLsn.indexOf(pkRangeIdSeparator));
String[] segments = parsedSessionToken.split(segmentSeparator);
String latestLsn = segments[0];
if (segments.length >= 2) {
latestLsn = segments[1];
}
if (ownerValue == null) {
ownerValue = "";
}
if (feedResponse.getResults() == null || feedResponse.getResults().size() == 0) {
return Pair.of(ownerValue + "_" + lease.getLeaseToken(), 0);
}
Integer currentLsn = 0;
Integer estimatedLag = 0;
try {
currentLsn = Integer.valueOf(feedResponse.getResults().get(0).get(lsnPropertyName).asText("0"));
estimatedLag = Integer.valueOf(latestLsn);
estimatedLag = estimatedLag - currentLsn + 1;
} catch (NumberFormatException ex) {
logger.warn("Unexpected Cosmos LSN found", ex);
estimatedLag = -1;
}
return Pair.of(ownerValue + "_" + lease.getLeaseToken() + "_" + currentLsn + "_" + latestLsn, estimatedLag);
});
})
.collectList()
.map(valueList -> {
Map<String, Integer> result = new ConcurrentHashMap<>();
for (Pair<String, Integer> pair : valueList) {
result.put(pair.getKey(), pair.getValue());
}
return result;
});
}
/**
* Sets the host name.
*
* @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl hostName(String hostName) {
this.hostName = hostName;
return this;
}
/**
* Sets and existing {@link CosmosAsyncContainer} to be used to read from the monitored collection.
*
* @param feedDocumentClient the instance of {@link CosmosAsyncContainer} to be used.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl feedContainer(CosmosAsyncContainer feedDocumentClient) {
if (feedDocumentClient == null) {
throw new IllegalArgumentException("feedContextClient");
}
this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient);
return this;
}
/**
* Sets the {@link ChangeFeedProcessorOptions} to be used.
*
* @param changeFeedProcessorOptions the change feed processor options to use.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) {
if (changeFeedProcessorOptions == null) {
throw new IllegalArgumentException("changeFeedProcessorOptions");
}
this.changeFeedProcessorOptions = changeFeedProcessorOptions;
return this;
}
/**
* Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver}
*
* @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) {
if (observerFactory == null) {
throw new IllegalArgumentException("observerFactory");
}
this.observerFactory = observerFactory;
return this;
}
/**
* Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes.
* @param type the type of {@link ChangeFeedObserver} to be used.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observer(Class<? extends ChangeFeedObserver> type) {
if (type == null) {
throw new IllegalArgumentException("type");
}
this.observerFactory = new ChangeFeedObserverFactoryImpl(type);
return this;
}
@Override
public ChangeFeedProcessorBuilderImpl handleChanges(Consumer<List<JsonNode>> consumer) {
return this.observerFactory(new DefaultObserverFactory(consumer));
}
/**
* Sets the database resource ID of the monitored collection.
*
* @param databaseResourceId the database resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) {
this.databaseResourceId = databaseResourceId;
return this;
}
/**
* Sets the collection resource ID of the monitored collection.
* @param collectionResourceId the collection resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) {
this.collectionResourceId = collectionResourceId;
return this;
}
/**
* Sets an existing {@link CosmosAsyncContainer} to be used to read from the leases collection.
*
* @param leaseClient the instance of {@link CosmosAsyncContainer} to use.
* @return current Builder.
*/
@Override
/**
* Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
*
* @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) {
if (loadBalancingStrategy == null) {
throw new IllegalArgumentException("loadBalancingStrategy");
}
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing.
*
* @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) {
if (partitionProcessorFactory == null) {
throw new IllegalArgumentException("partitionProcessorFactory");
}
this.partitionProcessorFactory = partitionProcessorFactory;
return this;
}
/**
* Sets the {@link LeaseStoreManager} to be used to manage leases.
*
* @param leaseStoreManager the instance of {@link LeaseStoreManager} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) {
if (leaseStoreManager == null) {
throw new IllegalArgumentException("leaseStoreManager");
}
this.leaseStoreManager = leaseStoreManager;
return this;
}
/**
* Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation.
*
* @param healthMonitor The instance of {@link HealthMonitor} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) {
if (healthMonitor == null) {
throw new IllegalArgumentException("healthMonitor");
}
this.healthMonitor = healthMonitor;
return this;
}
/**
* Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously.
*
* @return an instance of {@link ChangeFeedProcessor}.
*/
@Override
public ChangeFeedProcessor build() {
if (this.hostName == null) {
throw new IllegalArgumentException("Host name was not specified");
}
if (this.observerFactory == null) {
throw new IllegalArgumentException("Observer was not specified");
}
if (this.scheduler == null) {
this.scheduler = Schedulers.elastic();
}
return this;
}
public ChangeFeedProcessorBuilderImpl() {
this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
this.degreeOfParallelism = 25;
}
public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) {
this.partitionManager = partitionManager;
}
private Mono<ChangeFeedProcessor> initializeCollectionPropertiesForBuild() {
if (this.changeFeedProcessorOptions == null) {
this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
}
return this.feedContextClient
.readDatabase(this.feedContextClient.getDatabaseClient(), null)
.map( databaseResourceResponse -> {
this.databaseResourceId = databaseResourceResponse.getProperties().getId();
return this.databaseResourceId;
})
.flatMap( id -> this.feedContextClient
.readContainer(this.feedContextClient.getContainerClient(), null)
.map(documentCollectionResourceResponse -> {
this.collectionResourceId = documentCollectionResourceResponse.getProperties().getId();
return this;
}));
}
private Mono<LeaseStoreManager> getLeaseStoreManager() {
if (this.leaseStoreManager == null) {
return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null)
.flatMap( collectionSettings -> {
boolean isPartitioned =
collectionSettings.getPartitionKeyDefinition() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths().size() > 0;
if (!isPartitioned || (collectionSettings.getPartitionKeyDefinition().getPaths().size() != 1 || !collectionSettings.getPartitionKeyDefinition().getPaths().get(0).equals("/id"))) {
return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id."));
}
RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory();
String leasePrefix = this.getLeasePrefix();
return LeaseStoreManager.builder()
.leasePrefix(leasePrefix)
.leaseCollectionLink(this.leaseContextClient.getContainerClient())
.leaseContextClient(this.leaseContextClient)
.requestOptionsFactory(requestOptionsFactory)
.hostName(this.hostName)
.build()
.map(manager -> {
this.leaseStoreManager = manager;
return this.leaseStoreManager;
});
});
}
return Mono.just(this.leaseStoreManager);
}
private String getLeasePrefix() {
String optionsPrefix = this.changeFeedProcessorOptions.getLeasePrefix();
if (optionsPrefix == null) {
optionsPrefix = "";
}
URI uri = this.feedContextClient.getServiceEndpoint();
return String.format(
"%s%s_%s_%s",
optionsPrefix,
uri.getHost(),
this.databaseResourceId,
this.collectionResourceId);
}
private Mono<PartitionManager> buildPartitionManager(LeaseStoreManager leaseStoreManager) {
CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency());
PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl(
this.feedContextClient,
this.feedContextClient.getContainerClient(),
leaseStoreManager,
leaseStoreManager,
this.degreeOfParallelism,
this.queryPartitionsMaxBatchSize
);
Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime);
PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl(
factory,
leaseStoreManager,
this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl(
this.feedContextClient,
this.changeFeedProcessorOptions,
leaseStoreManager,
this.feedContextClient.getContainerClient()),
this.changeFeedProcessorOptions,
this.scheduler
);
if (this.loadBalancingStrategy == null) {
this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy(
this.hostName,
this.changeFeedProcessorOptions.getMinScaleCount(),
this.changeFeedProcessorOptions.getMaxScaleCount(),
this.changeFeedProcessorOptions.getLeaseExpirationInterval());
}
PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, scheduler);
if (this.healthMonitor == null) {
this.healthMonitor = new TraceHealthMonitor();
}
PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor);
PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl(
partitionController2,
leaseStoreManager,
this.loadBalancingStrategy,
this.changeFeedProcessorOptions.getLeaseAcquireInterval(),
this.scheduler
);
PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer);
return Mono.just(partitionManager);
}
@Override
public void close() {
this.stop().subscribeOn(Schedulers.elastic()).subscribe();
}
} | class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(ChangeFeedProcessorBuilderImpl.class);
private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis();
private final Duration sleepTime = Duration.ofSeconds(15);
private final Duration lockTime = Duration.ofSeconds(30);
private static final int DefaultQueryPartitionsMaxBatchSize = 100;
private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
private int degreeOfParallelism = 25;
private String hostName;
private ChangeFeedContextClient feedContextClient;
private ChangeFeedProcessorOptions changeFeedProcessorOptions;
private ChangeFeedObserverFactory observerFactory;
private volatile String databaseResourceId;
private volatile String collectionResourceId;
private ChangeFeedContextClient leaseContextClient;
private PartitionLoadBalancingStrategy loadBalancingStrategy;
private PartitionProcessorFactory partitionProcessorFactory;
private LeaseStoreManager leaseStoreManager;
private HealthMonitor healthMonitor;
private volatile PartitionManager partitionManager;
private Scheduler scheduler;
/**
* Start listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> start() {
if (this.partitionManager == null) {
return this.initializeCollectionPropertiesForBuild()
.flatMap( value -> this.getLeaseStoreManager()
.flatMap(leaseStoreManager -> this.buildPartitionManager(leaseStoreManager)))
.flatMap(partitionManager1 -> {
this.partitionManager = partitionManager1;
return this.partitionManager.start();
});
} else {
return partitionManager.start();
}
}
/**
* Stops listening for changes asynchronously.
*
* @return a representation of the deferred computation of this call.
*/
@Override
public Mono<Void> stop() {
if (this.partitionManager == null || !this.partitionManager.isRunning()) {
throw new IllegalStateException("The ChangeFeedProcessor instance has not fully started");
}
return this.partitionManager.stop();
}
/**
* Returns the state of the change feed processor.
*
* @return true if the change feed processor is currently active and running.
*/
@Override
public boolean isStarted() {
return this.partitionManager != null && this.partitionManager.isRunning();
}
/**
* Returns the current owner (host) and an approximation of the difference between the last processed item (defined
* by the state of the feed container) and the latest change in the container for each partition (lease
* document).
* <p>
* An empty map will be returned if the processor was not started or no lease documents matching the current
* {@link ChangeFeedProcessor} instance's lease prefix could be found.
*
* @return a map representing the current owner and lease token, the current LSN and latest LSN, and the estimated
* lag, asynchronously.
*/
@Override
public Mono<Map<String, Integer>> getEstimatedLag() {
Map<String, Integer> earlyResult = new ConcurrentHashMap<>();
if (this.leaseStoreManager == null || this.feedContextClient == null) {
return Mono.just(earlyResult);
}
return this.leaseStoreManager.getAllLeases()
.flatMap(lease -> {
ChangeFeedOptions options = new ChangeFeedOptions()
.setMaxItemCount(1)
.setPartitionKeyRangeId(lease.getLeaseToken())
.setStartFromBeginning(true)
.setRequestContinuation(lease.getContinuationToken());
return this.feedContextClient.createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options)
.take(1)
.map(feedResponse -> {
final String pkRangeIdSeparator = ":";
final String segmentSeparator = "
final String lsnPropertyName = "_lsn";
String ownerValue = lease.getOwner();
String sessionTokenLsn = feedResponse.getSessionToken();
String parsedSessionToken = sessionTokenLsn.substring(sessionTokenLsn.indexOf(pkRangeIdSeparator));
String[] segments = parsedSessionToken.split(segmentSeparator);
String latestLsn = segments[0];
if (segments.length >= 2) {
latestLsn = segments[1];
}
if (ownerValue == null) {
ownerValue = "";
}
if (feedResponse.getResults() == null || feedResponse.getResults().size() == 0) {
return Pair.of(ownerValue + "_" + lease.getLeaseToken(), 0);
}
Integer currentLsn = 0;
Integer estimatedLag = 0;
try {
currentLsn = Integer.valueOf(feedResponse.getResults().get(0).get(lsnPropertyName).asText("0"));
estimatedLag = Integer.valueOf(latestLsn);
estimatedLag = estimatedLag - currentLsn + 1;
} catch (NumberFormatException ex) {
logger.warn("Unexpected Cosmos LSN found", ex);
estimatedLag = -1;
}
return Pair.of(ownerValue + "_" + lease.getLeaseToken() + "_" + currentLsn + "_" + latestLsn, estimatedLag);
});
})
.collectList()
.map(valueList -> {
Map<String, Integer> result = new ConcurrentHashMap<>();
for (Pair<String, Integer> pair : valueList) {
result.put(pair.getKey(), pair.getValue());
}
return result;
});
}
/**
* Sets the host name.
*
* @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl hostName(String hostName) {
this.hostName = hostName;
return this;
}
/**
* Sets and existing {@link CosmosAsyncContainer} to be used to read from the monitored collection.
*
* @param feedDocumentClient the instance of {@link CosmosAsyncContainer} to be used.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl feedContainer(CosmosAsyncContainer feedDocumentClient) {
if (feedDocumentClient == null) {
throw new IllegalArgumentException("feedContextClient");
}
this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient);
return this;
}
/**
* Sets the {@link ChangeFeedProcessorOptions} to be used.
*
* @param changeFeedProcessorOptions the change feed processor options to use.
* @return current Builder.
*/
@Override
public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) {
if (changeFeedProcessorOptions == null) {
throw new IllegalArgumentException("changeFeedProcessorOptions");
}
this.changeFeedProcessorOptions = changeFeedProcessorOptions;
return this;
}
/**
* Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver}
*
* @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) {
if (observerFactory == null) {
throw new IllegalArgumentException("observerFactory");
}
this.observerFactory = observerFactory;
return this;
}
/**
* Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes.
* @param type the type of {@link ChangeFeedObserver} to be used.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl observer(Class<? extends ChangeFeedObserver> type) {
if (type == null) {
throw new IllegalArgumentException("type");
}
this.observerFactory = new ChangeFeedObserverFactoryImpl(type);
return this;
}
@Override
public ChangeFeedProcessorBuilderImpl handleChanges(Consumer<List<JsonNode>> consumer) {
return this.observerFactory(new DefaultObserverFactory(consumer));
}
/**
* Sets the database resource ID of the monitored collection.
*
* @param databaseResourceId the database resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) {
this.databaseResourceId = databaseResourceId;
return this;
}
/**
* Sets the collection resource ID of the monitored collection.
* @param collectionResourceId the collection resource ID of the monitored collection.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) {
this.collectionResourceId = collectionResourceId;
return this;
}
/**
* Sets an existing {@link CosmosAsyncContainer} to be used to read from the leases collection.
*
* @param leaseClient the instance of {@link CosmosAsyncContainer} to use.
* @return current Builder.
*/
@Override
/**
* Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
*
* @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) {
if (loadBalancingStrategy == null) {
throw new IllegalArgumentException("loadBalancingStrategy");
}
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing.
*
* @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) {
if (partitionProcessorFactory == null) {
throw new IllegalArgumentException("partitionProcessorFactory");
}
this.partitionProcessorFactory = partitionProcessorFactory;
return this;
}
/**
* Sets the {@link LeaseStoreManager} to be used to manage leases.
*
* @param leaseStoreManager the instance of {@link LeaseStoreManager} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) {
if (leaseStoreManager == null) {
throw new IllegalArgumentException("leaseStoreManager");
}
this.leaseStoreManager = leaseStoreManager;
return this;
}
/**
* Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation.
*
* @param healthMonitor The instance of {@link HealthMonitor} to use.
* @return current Builder.
*/
public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) {
if (healthMonitor == null) {
throw new IllegalArgumentException("healthMonitor");
}
this.healthMonitor = healthMonitor;
return this;
}
/**
* Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously.
*
* @return an instance of {@link ChangeFeedProcessor}.
*/
@Override
public ChangeFeedProcessor build() {
if (this.hostName == null) {
throw new IllegalArgumentException("Host name was not specified");
}
if (this.observerFactory == null) {
throw new IllegalArgumentException("Observer was not specified");
}
if (this.scheduler == null) {
this.scheduler = Schedulers.elastic();
}
return this;
}
public ChangeFeedProcessorBuilderImpl() {
this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize;
this.degreeOfParallelism = 25;
}
public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) {
this.partitionManager = partitionManager;
}
private Mono<ChangeFeedProcessor> initializeCollectionPropertiesForBuild() {
if (this.changeFeedProcessorOptions == null) {
this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
}
return this.feedContextClient
.readDatabase(this.feedContextClient.getDatabaseClient(), null)
.map( databaseResourceResponse -> {
this.databaseResourceId = databaseResourceResponse.getProperties().getId();
return this.databaseResourceId;
})
.flatMap( id -> this.feedContextClient
.readContainer(this.feedContextClient.getContainerClient(), null)
.map(documentCollectionResourceResponse -> {
this.collectionResourceId = documentCollectionResourceResponse.getProperties().getId();
return this;
}));
}
private Mono<LeaseStoreManager> getLeaseStoreManager() {
if (this.leaseStoreManager == null) {
return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null)
.flatMap( collectionSettings -> {
boolean isPartitioned =
collectionSettings.getPartitionKeyDefinition() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths() != null &&
collectionSettings.getPartitionKeyDefinition().getPaths().size() > 0;
if (!isPartitioned || (collectionSettings.getPartitionKeyDefinition().getPaths().size() != 1 || !collectionSettings.getPartitionKeyDefinition().getPaths().get(0).equals("/id"))) {
return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id."));
}
RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory();
String leasePrefix = this.getLeasePrefix();
return LeaseStoreManager.builder()
.leasePrefix(leasePrefix)
.leaseCollectionLink(this.leaseContextClient.getContainerClient())
.leaseContextClient(this.leaseContextClient)
.requestOptionsFactory(requestOptionsFactory)
.hostName(this.hostName)
.build()
.map(manager -> {
this.leaseStoreManager = manager;
return this.leaseStoreManager;
});
});
}
return Mono.just(this.leaseStoreManager);
}
private String getLeasePrefix() {
String optionsPrefix = this.changeFeedProcessorOptions.getLeasePrefix();
if (optionsPrefix == null) {
optionsPrefix = "";
}
URI uri = this.feedContextClient.getServiceEndpoint();
return String.format(
"%s%s_%s_%s",
optionsPrefix,
uri.getHost(),
this.databaseResourceId,
this.collectionResourceId);
}
private Mono<PartitionManager> buildPartitionManager(LeaseStoreManager leaseStoreManager) {
CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency());
PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl(
this.feedContextClient,
this.feedContextClient.getContainerClient(),
leaseStoreManager,
leaseStoreManager,
this.degreeOfParallelism,
this.queryPartitionsMaxBatchSize
);
Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime);
PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl(
factory,
leaseStoreManager,
this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl(
this.feedContextClient,
this.changeFeedProcessorOptions,
leaseStoreManager,
this.feedContextClient.getContainerClient()),
this.changeFeedProcessorOptions,
this.scheduler
);
if (this.loadBalancingStrategy == null) {
this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy(
this.hostName,
this.changeFeedProcessorOptions.getMinScaleCount(),
this.changeFeedProcessorOptions.getMaxScaleCount(),
this.changeFeedProcessorOptions.getLeaseExpirationInterval());
}
PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, scheduler);
if (this.healthMonitor == null) {
this.healthMonitor = new TraceHealthMonitor();
}
PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor);
PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl(
partitionController2,
leaseStoreManager,
this.loadBalancingStrategy,
this.changeFeedProcessorOptions.getLeaseAcquireInterval(),
this.scheduler
);
PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer);
return Mono.just(partitionManager);
}
@Override
public void close() {
this.stop().subscribeOn(Schedulers.elastic()).subscribe();
}
} |
Could we make this String value a constant, I believe making the implementation constant public would work. Same comment for other places where we are doing this. | public EdgeNGramTokenFilter() {
odataType = "
} | odataType = " | public EdgeNGramTokenFilter() {
odataType = V2_ODATA_TYPE;
} | class EdgeNGramTokenFilter extends TokenFilter {
@JsonProperty(value = "@odata.type")
private String odataType;
/*
* The minimum n-gram length. Default is 1. Must be less than the value of
* maxGram.
*/
@JsonProperty(value = "minGram")
private Integer minGram;
/*
* The maximum n-gram length. Default is 2.
*/
@JsonProperty(value = "maxGram")
private Integer maxGram;
/*
* Specifies which side of the input the n-gram should be generated from.
* Default is "front". Possible values include: 'Front', 'Back'
*/
@JsonProperty(value = "side")
private EdgeNGramTokenFilterSide side;
/**
* Constructor for {@link EdgeNGramTokenFilter}.
*/
/**
* Get the minGram property: The minimum n-gram length. Default is 1. Must
* be less than the value of maxGram.
*
* @return the minGram value.
*/
public Integer getMinGram() {
return this.minGram;
}
/**
* Set the minGram property: The minimum n-gram length. Default is 1. Must
* be less than the value of maxGram.
*
* @param minGram the minGram value to set.
* @return the EdgeNGramTokenFilter object itself.
*/
public EdgeNGramTokenFilter setMinGram(Integer minGram) {
this.minGram = minGram;
return this;
}
/**
* Get the maxGram property: The maximum n-gram length. Default is 2.
*
* @return the maxGram value.
*/
public Integer getMaxGram() {
return this.maxGram;
}
/**
* Set the maxGram property: The maximum n-gram length. Default is 2.
*
* @param maxGram the maxGram value to set.
* @return the EdgeNGramTokenFilter object itself.
*/
public EdgeNGramTokenFilter setMaxGram(Integer maxGram) {
this.maxGram = maxGram;
return this;
}
/**
* Get the side property: Specifies which side of the input the n-gram
* should be generated from. Default is "front". Possible values include:
* 'Front', 'Back'.
*
* @return the side value.
*/
public EdgeNGramTokenFilterSide getSide() {
return this.side;
}
/**
* Set the side property: Specifies which side of the input the n-gram
* should be generated from. Default is "front". Possible values include:
* 'Front', 'Back'.
*
* @param side the side value to set.
* @return the EdgeNGramTokenFilter object itself.
*/
public EdgeNGramTokenFilter setSide(EdgeNGramTokenFilterSide side) {
this.side = side;
return this;
}
} | class EdgeNGramTokenFilter extends TokenFilter {
private static final String V2_ODATA_TYPE = "
@JsonProperty(value = "@odata.type")
private String odataType;
/*
* The minimum n-gram length. Default is 1. Must be less than the value of
* maxGram.
*/
@JsonProperty(value = "minGram")
private Integer minGram;
/*
* The maximum n-gram length. Default is 2.
*/
@JsonProperty(value = "maxGram")
private Integer maxGram;
/*
* Specifies which side of the input the n-gram should be generated from.
* Default is "front". Possible values include: 'Front', 'Back'
*/
@JsonProperty(value = "side")
private EdgeNGramTokenFilterSide side;
/**
* Constructor for {@link EdgeNGramTokenFilter}.
*/
/**
* Get the minGram property: The minimum n-gram length. Default is 1. Must
* be less than the value of maxGram.
*
* @return the minGram value.
*/
public Integer getMinGram() {
return this.minGram;
}
/**
* Set the minGram property: The minimum n-gram length. Default is 1. Must
* be less than the value of maxGram.
*
* @param minGram the minGram value to set.
* @return the EdgeNGramTokenFilter object itself.
*/
public EdgeNGramTokenFilter setMinGram(Integer minGram) {
this.minGram = minGram;
return this;
}
/**
* Get the maxGram property: The maximum n-gram length. Default is 2.
*
* @return the maxGram value.
*/
public Integer getMaxGram() {
return this.maxGram;
}
/**
* Set the maxGram property: The maximum n-gram length. Default is 2.
*
* @param maxGram the maxGram value to set.
* @return the EdgeNGramTokenFilter object itself.
*/
public EdgeNGramTokenFilter setMaxGram(Integer maxGram) {
this.maxGram = maxGram;
return this;
}
/**
* Get the side property: Specifies which side of the input the n-gram
* should be generated from. Default is "front". Possible values include:
* 'Front', 'Back'.
*
* @return the side value.
*/
public EdgeNGramTokenFilterSide getSide() {
return this.side;
}
/**
* Set the side property: Specifies which side of the input the n-gram
* should be generated from. Default is "front". Possible values include:
* 'Front', 'Back'.
*
* @param side the side value to set.
* @return the EdgeNGramTokenFilter object itself.
*/
public EdgeNGramTokenFilter setSide(EdgeNGramTokenFilterSide side) {
this.side = side;
return this;
}
} |
How large can this `Integer` be? According to the service team, we'll eventually start supporting 64-bits integers here (not a concern for Preview 3, I think). | public FieldValue setFormFieldInteger(final Integer formFieldInteger) {
this.formFieldInteger = formFieldInteger;
return this;
} | } | public FieldValue setFormFieldInteger(final Integer formFieldInteger) {
this.formFieldInteger = formFieldInteger;
return this;
} | class FieldValue {
private final FieldValueType type;
private Map<String, FormField> formFieldMap;
private List<FormField> formFieldList;
private Float formFieldFloat;
private Integer formFieldInteger;
private LocalDate formFieldDate;
private LocalTime formFieldTime;
private String formFieldString;
private String formFieldPhoneNumber;
/**
* Constructs a FieldValue object
*
* @param type The type of the field.
*/
public FieldValue(final FieldValueType type) {
this.type = type;
}
/**
* Set the map value of the field.
*
* @param formFieldMap the map value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldMap(final Map<String, FormField> formFieldMap) {
this.formFieldMap = formFieldMap;
return this;
}
/**
* Set the list value of the field.
*
* @param formFieldList the list of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldList(final List<FormField> formFieldList) {
this.formFieldList = formFieldList;
return this;
}
/**
* Set the float value of the field.
*
* @param formFieldFloat the float value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldFloat(final Float formFieldFloat) {
this.formFieldFloat = formFieldFloat;
return this;
}
/**
* Set the integer value of the field.
*
* @param formFieldInteger the integer value of the field.
*
* @return the FieldValue object itself.
*/
/**
* Set the date value of the field.
*
* @param formFieldDate the date value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldDate(final LocalDate formFieldDate) {
this.formFieldDate = formFieldDate;
return this;
}
/**
* Set the time value of the field.
*
* @param formFieldTime the time value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldTime(final LocalTime formFieldTime) {
this.formFieldTime = formFieldTime;
return this;
}
/**
* Set the string value of the field.
*
* @param formFieldString the string value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldString(final String formFieldString) {
this.formFieldString = formFieldString;
return this;
}
/**
* Set the phone number value of the field.
*
* @param formFieldPhoneNumber the phone number value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldPhoneNumber(final String formFieldPhoneNumber) {
this.formFieldPhoneNumber = formFieldPhoneNumber;
return this;
}
/**
* Gets the type of the value of the field.
*
* @return the {@link FieldValueType type} of the field.
*/
public FieldValueType getType() {
return type;
}
/**
* Gets the value of the field as a {@link String}.
*
* @return the value of the field as a {@link String}.
*/
public String asString() {
return this.formFieldString;
}
/**
* Gets the value of the field as a {@link Integer}.
*
* @return the value of the field as a {@link Integer}.
*/
public Integer asInteger() {
return this.formFieldInteger;
}
/**
* Gets the value of the field as a {@link Float}.
*
* @return the value of the field as a {@link Float}.
*/
public Float asFloat() {
return this.formFieldFloat;
}
/**
* Gets the value of the field as a {@link LocalDate}.
*
* @return the value of the field as a {@link LocalDate}.
*/
public LocalDate asDate() {
return this.formFieldDate;
}
/**
* Gets the value of the field as a {@link LocalTime}.
*
* @return the value of the field as a {@link LocalTime}.
*/
public LocalTime asTime() {
return this.formFieldTime;
}
/**
* Gets the value of the field as a phone number.
*
* @return the value of the field as a phone number.
*/
public String asPhoneNumber() {
return this.formFieldPhoneNumber;
}
/**
* Gets the value of the field as a {@link List}.
*
* @return the value of the field as a {@link List}.
*/
public List<FormField> asList() {
return this.formFieldList;
}
/**
* Gets the value of the field as a {@link Map}.
*
* @return the value of the field as a {@link Map}.
*/
public Map<String, FormField> asMap() {
return this.formFieldMap;
}
} | class FieldValue {
private final FieldValueType type;
private Map<String, FormField> formFieldMap;
private List<FormField> formFieldList;
private Float formFieldFloat;
private Integer formFieldInteger;
private LocalDate formFieldDate;
private LocalTime formFieldTime;
private String formFieldString;
private String formFieldPhoneNumber;
/**
* Constructs a FieldValue object
*
* @param type The type of the field.
*/
public FieldValue(final FieldValueType type) {
this.type = type;
}
/**
* Set the map value of the field.
*
* @param formFieldMap the map value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldMap(final Map<String, FormField> formFieldMap) {
this.formFieldMap = formFieldMap;
return this;
}
/**
* Set the list value of the field.
*
* @param formFieldList the list of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldList(final List<FormField> formFieldList) {
this.formFieldList = formFieldList;
return this;
}
/**
* Set the float value of the field.
*
* @param formFieldFloat the float value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldFloat(final Float formFieldFloat) {
this.formFieldFloat = formFieldFloat;
return this;
}
/**
* Set the integer value of the field.
*
* @param formFieldInteger the integer value of the field.
*
* @return the FieldValue object itself.
*/
/**
* Set the date value of the field.
*
* @param formFieldDate the date value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldDate(final LocalDate formFieldDate) {
this.formFieldDate = formFieldDate;
return this;
}
/**
* Set the time value of the field.
*
* @param formFieldTime the time value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldTime(final LocalTime formFieldTime) {
this.formFieldTime = formFieldTime;
return this;
}
/**
* Set the string value of the field.
*
* @param formFieldString the string value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldString(final String formFieldString) {
this.formFieldString = formFieldString;
return this;
}
/**
* Set the phone number value of the field.
*
* @param formFieldPhoneNumber the phone number value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldPhoneNumber(final String formFieldPhoneNumber) {
this.formFieldPhoneNumber = formFieldPhoneNumber;
return this;
}
/**
* Gets the type of the value of the field.
*
* @return the {@link FieldValueType type} of the field.
*/
public FieldValueType getType() {
return type;
}
/**
* Gets the value of the field as a {@link String}.
*
* @return the value of the field as a {@link String}.
*/
public String asString() {
return this.formFieldString;
}
/**
* Gets the value of the field as a {@link Integer}.
*
* @return the value of the field as a {@link Integer}.
*/
public Integer asInteger() {
return this.formFieldInteger;
}
/**
* Gets the value of the field as a {@link Float}.
*
* @return the value of the field as a {@link Float}.
*/
public Float asFloat() {
return this.formFieldFloat;
}
/**
* Gets the value of the field as a {@link LocalDate}.
*
* @return the value of the field as a {@link LocalDate}.
*/
public LocalDate asDate() {
return this.formFieldDate;
}
/**
* Gets the value of the field as a {@link LocalTime}.
*
* @return the value of the field as a {@link LocalTime}.
*/
public LocalTime asTime() {
return this.formFieldTime;
}
/**
* Gets the value of the field as a phone number.
*
* @return the value of the field as a phone number.
*/
public String asPhoneNumber() {
return this.formFieldPhoneNumber;
}
/**
* Gets the value of the field as a {@link List}.
*
* @return the value of the field as a {@link List}.
*/
public List<FormField> asList() {
return this.formFieldList;
}
/**
* Gets the value of the field as a {@link Map}.
*
* @return the value of the field as a {@link Map}.
*/
public Map<String, FormField> asMap() {
return this.formFieldMap;
}
} |
I think that should change the generated code for the service to start sending in a `Long` value so will defer doing this with the service update. | public FieldValue setFormFieldInteger(final Integer formFieldInteger) {
this.formFieldInteger = formFieldInteger;
return this;
} | } | public FieldValue setFormFieldInteger(final Integer formFieldInteger) {
this.formFieldInteger = formFieldInteger;
return this;
} | class FieldValue {
private final FieldValueType type;
private Map<String, FormField> formFieldMap;
private List<FormField> formFieldList;
private Float formFieldFloat;
private Integer formFieldInteger;
private LocalDate formFieldDate;
private LocalTime formFieldTime;
private String formFieldString;
private String formFieldPhoneNumber;
/**
* Constructs a FieldValue object
*
* @param type The type of the field.
*/
public FieldValue(final FieldValueType type) {
this.type = type;
}
/**
* Set the map value of the field.
*
* @param formFieldMap the map value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldMap(final Map<String, FormField> formFieldMap) {
this.formFieldMap = formFieldMap;
return this;
}
/**
* Set the list value of the field.
*
* @param formFieldList the list of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldList(final List<FormField> formFieldList) {
this.formFieldList = formFieldList;
return this;
}
/**
* Set the float value of the field.
*
* @param formFieldFloat the float value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldFloat(final Float formFieldFloat) {
this.formFieldFloat = formFieldFloat;
return this;
}
/**
* Set the integer value of the field.
*
* @param formFieldInteger the integer value of the field.
*
* @return the FieldValue object itself.
*/
/**
* Set the date value of the field.
*
* @param formFieldDate the date value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldDate(final LocalDate formFieldDate) {
this.formFieldDate = formFieldDate;
return this;
}
/**
* Set the time value of the field.
*
* @param formFieldTime the time value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldTime(final LocalTime formFieldTime) {
this.formFieldTime = formFieldTime;
return this;
}
/**
* Set the string value of the field.
*
* @param formFieldString the string value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldString(final String formFieldString) {
this.formFieldString = formFieldString;
return this;
}
/**
* Set the phone number value of the field.
*
* @param formFieldPhoneNumber the phone number value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldPhoneNumber(final String formFieldPhoneNumber) {
this.formFieldPhoneNumber = formFieldPhoneNumber;
return this;
}
/**
* Gets the type of the value of the field.
*
* @return the {@link FieldValueType type} of the field.
*/
public FieldValueType getType() {
return type;
}
/**
* Gets the value of the field as a {@link String}.
*
* @return the value of the field as a {@link String}.
*/
public String asString() {
return this.formFieldString;
}
/**
* Gets the value of the field as a {@link Integer}.
*
* @return the value of the field as a {@link Integer}.
*/
public Integer asInteger() {
return this.formFieldInteger;
}
/**
* Gets the value of the field as a {@link Float}.
*
* @return the value of the field as a {@link Float}.
*/
public Float asFloat() {
return this.formFieldFloat;
}
/**
* Gets the value of the field as a {@link LocalDate}.
*
* @return the value of the field as a {@link LocalDate}.
*/
public LocalDate asDate() {
return this.formFieldDate;
}
/**
* Gets the value of the field as a {@link LocalTime}.
*
* @return the value of the field as a {@link LocalTime}.
*/
public LocalTime asTime() {
return this.formFieldTime;
}
/**
* Gets the value of the field as a phone number.
*
* @return the value of the field as a phone number.
*/
public String asPhoneNumber() {
return this.formFieldPhoneNumber;
}
/**
* Gets the value of the field as a {@link List}.
*
* @return the value of the field as a {@link List}.
*/
public List<FormField> asList() {
return this.formFieldList;
}
/**
* Gets the value of the field as a {@link Map}.
*
* @return the value of the field as a {@link Map}.
*/
public Map<String, FormField> asMap() {
return this.formFieldMap;
}
} | class FieldValue {
private final FieldValueType type;
private Map<String, FormField> formFieldMap;
private List<FormField> formFieldList;
private Float formFieldFloat;
private Integer formFieldInteger;
private LocalDate formFieldDate;
private LocalTime formFieldTime;
private String formFieldString;
private String formFieldPhoneNumber;
/**
* Constructs a FieldValue object
*
* @param type The type of the field.
*/
public FieldValue(final FieldValueType type) {
this.type = type;
}
/**
* Set the map value of the field.
*
* @param formFieldMap the map value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldMap(final Map<String, FormField> formFieldMap) {
this.formFieldMap = formFieldMap;
return this;
}
/**
* Set the list value of the field.
*
* @param formFieldList the list of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldList(final List<FormField> formFieldList) {
this.formFieldList = formFieldList;
return this;
}
/**
* Set the float value of the field.
*
* @param formFieldFloat the float value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldFloat(final Float formFieldFloat) {
this.formFieldFloat = formFieldFloat;
return this;
}
/**
* Set the integer value of the field.
*
* @param formFieldInteger the integer value of the field.
*
* @return the FieldValue object itself.
*/
/**
* Set the date value of the field.
*
* @param formFieldDate the date value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldDate(final LocalDate formFieldDate) {
this.formFieldDate = formFieldDate;
return this;
}
/**
* Set the time value of the field.
*
* @param formFieldTime the time value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldTime(final LocalTime formFieldTime) {
this.formFieldTime = formFieldTime;
return this;
}
/**
* Set the string value of the field.
*
* @param formFieldString the string value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldString(final String formFieldString) {
this.formFieldString = formFieldString;
return this;
}
/**
* Set the phone number value of the field.
*
* @param formFieldPhoneNumber the phone number value of the field.
*
* @return the FieldValue object itself.
*/
public FieldValue setFormFieldPhoneNumber(final String formFieldPhoneNumber) {
this.formFieldPhoneNumber = formFieldPhoneNumber;
return this;
}
/**
* Gets the type of the value of the field.
*
* @return the {@link FieldValueType type} of the field.
*/
public FieldValueType getType() {
return type;
}
/**
* Gets the value of the field as a {@link String}.
*
* @return the value of the field as a {@link String}.
*/
public String asString() {
return this.formFieldString;
}
/**
* Gets the value of the field as a {@link Integer}.
*
* @return the value of the field as a {@link Integer}.
*/
public Integer asInteger() {
return this.formFieldInteger;
}
/**
* Gets the value of the field as a {@link Float}.
*
* @return the value of the field as a {@link Float}.
*/
public Float asFloat() {
return this.formFieldFloat;
}
/**
* Gets the value of the field as a {@link LocalDate}.
*
* @return the value of the field as a {@link LocalDate}.
*/
public LocalDate asDate() {
return this.formFieldDate;
}
/**
* Gets the value of the field as a {@link LocalTime}.
*
* @return the value of the field as a {@link LocalTime}.
*/
public LocalTime asTime() {
return this.formFieldTime;
}
/**
* Gets the value of the field as a phone number.
*
* @return the value of the field as a phone number.
*/
public String asPhoneNumber() {
return this.formFieldPhoneNumber;
}
/**
* Gets the value of the field as a {@link List}.
*
* @return the value of the field as a {@link List}.
*/
public List<FormField> asList() {
return this.formFieldList;
}
/**
* Gets the value of the field as a {@link Map}.
*
* @return the value of the field as a {@link Map}.
*/
public Map<String, FormField> asMap() {
return this.formFieldMap;
}
} |
If the first thread that runs `supplier.get()` throws an error, then all subsequent `getValue()` calls will return an error. Instead, if the first attempt failed, should the next call to `getValue()` again attempt to get from the supplier? | public Mono<T> getValue() {
if (cache != null) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
} finally {
wip.set(false);
}
}
return emitterProcessor.next();
});
} | sink.error(e); | public Mono<T> getValue() {
return Mono.defer(() -> {
if (cache != null) {
return Mono.just(cache);
}
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
}
}
return replayProcessor.next();
});
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private T cache;
private final ReplayProcessor<T> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private volatile T cache;
private final ReplayProcessor<T> replayProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = replayProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} |
Yes. In this test, Retry-After is 1sec, default interval is 100ms. | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0); | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 3) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private static <T> T fromJson(String json, Type type) {
try {
return SERIALIZER.deserialize(json, type, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS,
response.getStatus());
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
response.getStatus());
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
System.out.println(String.format("[%s] PUT status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "SUCCEEDED"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} |
There's still a chance of race condition here when a thread marks `wip` as false and then another thread enters the `if` block and calls `supplier.get()` again updating the reference to `cache`. How critical is the need to call the supplier only once? | public Mono<T> getValue() {
if (cache != null) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
} finally {
wip.set(false);
}
}
return emitterProcessor.next();
});
} | wip.set(false); | public Mono<T> getValue() {
return Mono.defer(() -> {
if (cache != null) {
return Mono.just(cache);
}
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
}
}
return replayProcessor.next();
});
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private T cache;
private final ReplayProcessor<T> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private volatile T cache;
private final ReplayProcessor<T> replayProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = replayProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} |
This should also be inside `defer()`. The check should happen when there is a `subscriber`. | public Mono<T> getValue() {
if (cache != null) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
} finally {
wip.set(false);
}
}
return emitterProcessor.next();
});
} | } | public Mono<T> getValue() {
return Mono.defer(() -> {
if (cache != null) {
return Mono.just(cache);
}
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
}
}
return replayProcessor.next();
});
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private T cache;
private final ReplayProcessor<T> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private volatile T cache;
private final ReplayProcessor<T> replayProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = replayProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} |
as discussed. The supplier will use the same user config locked in at construction time, so a new instance of Identity client will need to be created to fix the issue with the passed in user configuration. | public Mono<T> getValue() {
if (cache != null) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
} finally {
wip.set(false);
}
}
return emitterProcessor.next();
});
} | sink.error(e); | public Mono<T> getValue() {
return Mono.defer(() -> {
if (cache != null) {
return Mono.just(cache);
}
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
}
}
return replayProcessor.next();
});
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private T cache;
private final ReplayProcessor<T> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private volatile T cache;
private final ReplayProcessor<T> replayProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = replayProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} |
moved it to the defer block. | public Mono<T> getValue() {
if (cache != null) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
} finally {
wip.set(false);
}
}
return emitterProcessor.next();
});
} | wip.set(false); | public Mono<T> getValue() {
return Mono.defer(() -> {
if (cache != null) {
return Mono.just(cache);
}
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
}
}
return replayProcessor.next();
});
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private T cache;
private final ReplayProcessor<T> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private volatile T cache;
private final ReplayProcessor<T> replayProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = replayProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} |
If the expectation is to create a new `IdentityClient` in case of errors, there's no need to reset `wip` to false. Only one thread ever has to enter the `wip` block and should either get `value` from supplier or throw an error. There's no need for another thread to re-enter this block. | public Mono<T> getValue() {
if (cache != null) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
} finally {
wip.set(false);
}
}
return emitterProcessor.next();
});
} | wip.set(false); | public Mono<T> getValue() {
return Mono.defer(() -> {
if (cache != null) {
return Mono.just(cache);
}
if (!wip.getAndSet(true)) {
try {
cache = supplier.get();
sink.next(cache);
} catch (Exception e) {
sink.error(e);
}
}
return replayProcessor.next();
});
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private T cache;
private final ReplayProcessor<T> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} | class SynchronizedAccessor<T> {
private final AtomicBoolean wip;
private volatile T cache;
private final ReplayProcessor<T> replayProcessor = ReplayProcessor.create(1);
private final FluxSink<T> sink = replayProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Supplier<T> supplier;
public SynchronizedAccessor(Supplier<T> supplier) {
this.wip = new AtomicBoolean(false);
this.supplier = supplier;
}
/**
* Get the value from the configured supplier.
*
* @return the output {@code T}
*/
} |
In test case, set `Retry-After` header to different value of default poll interval. | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux.blockLast().getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1")); | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 3) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private static <T> T fromJson(String json, Type type) {
try {
return SERIALIZER.deserialize(json, type, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS,
response.getStatus());
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
response.getStatus());
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
System.out.println(String.format("[%s] PUT status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "SUCCEEDED"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} |
If we just check polling duration larger than expected. I think we could make `Retry-After` larger than the original one. | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0); | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 3) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private static <T> T fromJson(String json, Type type) {
try {
return SERIALIZER.deserialize(json, type, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS,
response.getStatus());
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
response.getStatus());
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
System.out.println(String.format("[%s] PUT status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "SUCCEEDED"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} |
Got it. I had thought it was 30s. | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0); | public void lroRetryAfter() {
ServerConfigure configure = new ServerConfigure();
Duration expectedPollingDuration = Duration.ofSeconds(3);
configure.pollingCountTillSuccess = 3;
configure.additionalHeaders = new HttpHeaders(new HttpHeader("Retry-After", "1"));
WireMockServer lroServer = startServer(configure);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
long nanoTime = System.nanoTime();
FooWithProvisioningState result = lroFlux
.doOnNext(response -> {
System.out.println(String.format("[%s] status %s",
OffsetDateTime.now().toString(), response.getStatus().toString()));
}).blockLast()
.getFinalResult().block();
Assertions.assertNotNull(result);
Duration pollingDuration = Duration.ofNanos(System.nanoTime() - nanoTime);
Assertions.assertTrue(pollingDuration.compareTo(expectedPollingDuration) > 0);
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.IN_PROGRESS);
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 3) {
Assertions.assertEquals(response.getStatus(),
LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private static <T> T fromJson(String json, Type type) {
try {
return SERIALIZER.deserialize(json, type, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} | class LROPollerTests {
private static final SerializerAdapter SERIALIZER = new AzureJacksonAdapter();
private static final Duration POLLING_DURATION = Duration.ofMillis(100);
@BeforeEach
public void beforeTest() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void afterTest() {
Mockito.framework().clearInlineMocks();
}
@Host("http:
@ServiceInterface(name = "ProvisioningStateLroService")
interface ProvisioningStateLroServiceClient {
@Put("/resource/1")
Mono<Response<Flux<ByteBuffer>>> startLro();
}
@Test
public void lroBasedOnProvisioningState() {
WireMockServer lroServer = startServer();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
int[] onNextCallCount = new int[1];
lroFlux.doOnNext(response -> {
PollResult<FooWithProvisioningState> pollResult = response.getValue();
Assertions.assertNotNull(pollResult);
Assertions.assertNotNull(pollResult.getValue());
onNextCallCount[0]++;
if (onNextCallCount[0] == 1) {
Assertions.assertEquals(LongRunningOperationStatus.IN_PROGRESS,
response.getStatus());
Assertions.assertNull(pollResult.getValue().getResourceId());
} else if (onNextCallCount[0] == 2) {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
response.getStatus());
Assertions.assertNotNull(pollResult.getValue().getResourceId());
} else {
throw new IllegalStateException("Poller emitted more than expected value.");
}
}).blockLast();
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroSucceededNoPoll() {
final String resourceEndpoint = "/resource/1";
final String sampleVaultUpdateSucceededResponse = "{\"id\":\"/subscriptions/
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(200)
.body(sampleVaultUpdateSucceededResponse)
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<Resource>, Resource> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
Resource.class,
Resource.class,
POLLING_DURATION,
newLroInitFunction(client, Resource.class));
StepVerifier.create(lroFlux)
.expectSubscription()
.expectNextMatches(response -> {
PollResult<Resource> pollResult = response.getValue();
return response.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
&& pollResult != null
&& pollResult.getValue() != null
&& pollResult.getValue().id() != null;
}).verifyComplete();
AsyncPollResponse<PollResult<Resource>, Resource> asyncPollResponse = lroFlux.blockLast();
Assertions.assertNotNull(asyncPollResponse);
Resource result = asyncPollResponse.getFinalResult().block();
Assertions.assertNotNull(result);
Assertions.assertNotNull(result.id());
Assertions.assertEquals("v1weidxu", result.name());
Assertions.assertEquals("Microsoft.KeyVault/vaults", result.type());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
public void lroTimeout() {
final Duration timeoutDuration = Duration.ofMillis(1000);
final String resourceEndpoint = "/resource/1";
final AtomicInteger getCallCount = new AtomicInteger(0);
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT, RequestMethod.GET)) {
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount.getAndIncrement();
}
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
try {
final ProvisioningStateLroServiceClient client = RestProxy.create(ProvisioningStateLroServiceClient.class,
createHttpPipeline(lroServer.port()),
SERIALIZER);
PollerFlux<PollResult<FooWithProvisioningState>, FooWithProvisioningState> lroFlux
= PollerFactory.create(SERIALIZER,
new HttpPipelineBuilder().build(),
FooWithProvisioningState.class,
FooWithProvisioningState.class,
POLLING_DURATION,
newLroInitFunction(client, FooWithProvisioningState.class));
Mono<FooWithProvisioningState> resultMonoWithTimeout = lroFlux.last()
.flatMap(AsyncPollResponse::getFinalResult)
.timeout(timeoutDuration);
StepVerifier.create(resultMonoWithTimeout)
.thenAwait()
.verifyError(TimeoutException.class);
int count = getCallCount.get();
try {
Thread.sleep(timeoutDuration.toMillis());
} catch (InterruptedException e) {
}
Assertions.assertEquals(count, getCallCount.get());
} finally {
if (lroServer.isRunning()) {
lroServer.shutdown();
}
}
}
@Test
private static class ServerConfigure {
private int pollingCountTillSuccess = 2;
private HttpHeaders additionalHeaders = HttpHeaders.noHeaders();
}
private static WireMockServer startServer() {
return startServer(new ServerConfigure());
}
private static WireMockServer startServer(ServerConfigure serverConfigure) {
final String resourceEndpoint = "/resource/1";
ResponseTransformer provisioningStateLroService = new ResponseTransformer() {
private int[] getCallCount = new int[1];
@Override
public com.github.tomakehurst.wiremock.http.Response transform(Request request,
com.github.tomakehurst.wiremock.http.Response response,
FileSource fileSource,
Parameters parameters) {
if (!request.getUrl().endsWith(resourceEndpoint)) {
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.status(500)
.body("Unsupported path:" + request.getUrl())
.build();
}
if (request.getMethod().isOneOf(RequestMethod.PUT)) {
System.out.println(String.format("[%s] PUT status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
}
if (request.getMethod().isOneOf(RequestMethod.GET)) {
getCallCount[0]++;
if (getCallCount[0] < serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "IN_PROGRESS"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.headers(serverConfigure.additionalHeaders)
.body(toJson(new FooWithProvisioningState("IN_PROGRESS")))
.build();
} else if (getCallCount[0] == serverConfigure.pollingCountTillSuccess) {
System.out.println(String.format("[%s] GET status %s",
OffsetDateTime.now().toString(), "SUCCEEDED"));
return new com.github.tomakehurst.wiremock.http.Response.Builder()
.body(toJson(new FooWithProvisioningState("SUCCEEDED", UUID.randomUUID().toString())))
.build();
}
}
return response;
}
@Override
public String getName() {
return "LroService";
}
};
WireMockServer lroServer = createServer(provisioningStateLroService, resourceEndpoint);
lroServer.start();
return lroServer;
}
private static WireMockServer createServer(ResponseTransformer transformer,
String... endpoints) {
WireMockServer server = new WireMockServer(WireMockConfiguration
.options()
.dynamicPort()
.extensions(transformer)
.disableRequestJournal());
for (String endpoint : endpoints) {
server.stubFor(WireMock.any(WireMock.urlEqualTo(endpoint))
.willReturn(WireMock.aResponse()));
}
return server;
}
private static HttpPipeline createHttpPipeline(int port) {
return new HttpPipelineBuilder()
.policies(new HttpPipelinePolicy() {
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context,
HttpPipelineNextPolicy next) {
HttpRequest request = context.getHttpRequest();
request.setUrl(updatePort(request.getUrl(), port));
context.setHttpRequest(request);
return next.process();
}
private URL updatePort(URL url, int port) {
try {
return new URL(url.getProtocol(), url.getHost(), port, url.getFile());
} catch (MalformedURLException mue) {
throw new RuntimeException(mue);
}
}
})
.build();
}
private Mono<Response<Flux<ByteBuffer>>> newLroInitFunction(ProvisioningStateLroServiceClient client, Type type) {
return client.startLro();
}
private static String toJson(Object object) {
try {
return SERIALIZER.serialize(object, SerializerEncoding.JSON);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
} |
I'm okay with this as it follows what the service will be doing. It is a little concerning though as we are implicitly mutating passed customer value, so we need to make sure this is documented strongly somewhere. | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
Can we make `format` a static property on the class, it is the same during every call. Check if it is thread safe before making the change though. | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
Why was the logic here changed to eagerly call the deserializer? | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | Object obj = defaultDeserializer.deserialize(jp, ctxt); | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
}
} |
Why was this logic flipped? | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer); | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} |
I'm a little confused on the decimal to String conversion, is there any reason we can't use `%+02f` anymore in the `String.format` call? Are we looking to get additional units of precision in the decimal part? | public String toString() {
if (isValid()) {
String longitude = ("" + coordinates.get(0)).contains(".")
? "" + coordinates.get(0) : "" + coordinates.get(0) + ".0";
String latitude = ("" + coordinates.get(1)).contains(".")
? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0";
return String.format(
Locale.US,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem.toString());
}
return "";
} | String longitude = ("" + coordinates.get(0)).contains(".") | public String toString() {
if (isValid()) {
String longitude = Double.toString(coordinates.get(0));
String latitude = Double.toString(coordinates.get(1));
return String.format(
Locale.ROOT,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem);
}
return "";
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) != null && coordinates.get(1) != null
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} |
Don't need to call `toString` here as the formatting function should implicitly do that. | public String toString() {
if (isValid()) {
String longitude = ("" + coordinates.get(0)).contains(".")
? "" + coordinates.get(0) : "" + coordinates.get(0) + ".0";
String latitude = ("" + coordinates.get(1)).contains(".")
? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0";
return String.format(
Locale.US,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem.toString());
}
return "";
} | coordinateSystem.toString()); | public String toString() {
if (isValid()) {
String longitude = Double.toString(coordinates.get(0));
String latitude = Double.toString(coordinates.get(1));
return String.format(
Locale.ROOT,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem);
}
return "";
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) != null && coordinates.get(1) != null
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} |
Sure. Will add JavaDoc. | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
SimpleDateFormat is not thread safe. It will trigger the spotbugs https://stackoverflow.com/questions/6840803/why-is-javas-simpledateformat-not-thread-safe | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
The order does not make any differences. | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer); | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} |
This date format is used in multiple places. Consider making this a string constant. | private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
} | return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj); | private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
}
} |
Return the `obj` here instead of having empty `catch` block. | private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
} | private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
}
} | |
Since `Date` is in UTC, the mutation is not resulting in data-loss. So, it's okay to set the timezone to UTC and format the string. | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
Is this required? `module`, which has the date deserializer, is registered to the `mapper` in the next line. | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | mapper.registerModule(Iso8601DateSerializer.getModule()); | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} |
Set the locale to `Locale.ROOT`. | public String toString() {
if (isValid()) {
String longitude = ("" + coordinates.get(0)).contains(".")
? "" + coordinates.get(0) : "" + coordinates.get(0) + ".0";
String latitude = ("" + coordinates.get(1)).contains(".")
? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0";
return String.format(
Locale.US,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem.toString());
}
return "";
} | Locale.US, | public String toString() {
if (isValid()) {
String longitude = Double.toString(coordinates.get(0));
String latitude = Double.toString(coordinates.get(1));
return String.format(
Locale.ROOT,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem);
}
return "";
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) != null && coordinates.get(1) != null
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} |
`isValid` should also check that `coordinates.get(0)` and `coordinates.get(1)` are not null? | public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
} | && coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0 | public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) != null && coordinates.get(1) != null
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
public String toString() {
if (isValid()) {
String longitude = ("" + coordinates.get(0)).contains(".")
? "" + coordinates.get(0) : "" + coordinates.get(0) + ".0";
String latitude = ("" + coordinates.get(1)).contains(".")
? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0";
return String.format(
Locale.US,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem.toString());
}
return "";
}
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
public String toString() {
if (isValid()) {
String longitude = Double.toString(coordinates.get(0));
String latitude = Double.toString(coordinates.get(1));
return String.format(
Locale.ROOT,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem);
}
return "";
}
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} |
`Double.toString(value)` will append `.0`. Don't have to do this explicitly. ```suggestion String latitude = Double.toString(coordinates.get(1)); ``` | public String toString() {
if (isValid()) {
String longitude = ("" + coordinates.get(0)).contains(".")
? "" + coordinates.get(0) : "" + coordinates.get(0) + ".0";
String latitude = ("" + coordinates.get(1)).contains(".")
? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0";
return String.format(
Locale.US,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem.toString());
}
return "";
} | ? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0"; | public String toString() {
if (isValid()) {
String longitude = Double.toString(coordinates.get(0));
String latitude = Double.toString(coordinates.get(1));
return String.format(
Locale.ROOT,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem);
}
return "";
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) != null && coordinates.get(1) != null
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} |
Here we register both serializer and deserializer for java.utl.Date type | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | mapper.registerModule(Iso8601DateSerializer.getModule()); | public static void configureMapper(ObjectMapper mapper) {
mapper.registerModule(new JavaTimeModule());
mapper.disable(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE);
UntypedObjectDeserializer defaultDeserializer = new UntypedObjectDeserializer(null, null);
GeoPointDeserializer geoPointDeserializer = new GeoPointDeserializer(defaultDeserializer);
Iso8601DateDeserializer iso8601DateDeserializer = new Iso8601DateDeserializer(geoPointDeserializer);
SimpleModule module = new SimpleModule();
module.addDeserializer(Object.class, iso8601DateDeserializer);
mapper.registerModule(Iso8601DateSerializer.getModule());
mapper.registerModule(module);
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} | class SerializationUtil {
/**
* Configures an {@link ObjectMapper} with custom behavior needed to work with the Azure Cognitive Search REST API.
*
* @param mapper the mapper to be configured
*/
} |
I am trying to replicate the format service return. | public String toString() {
if (isValid()) {
String longitude = ("" + coordinates.get(0)).contains(".")
? "" + coordinates.get(0) : "" + coordinates.get(0) + ".0";
String latitude = ("" + coordinates.get(1)).contains(".")
? "" + coordinates.get(1) : "" + coordinates.get(1) + ".0";
return String.format(
Locale.US,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem.toString());
}
return "";
} | String longitude = ("" + coordinates.get(0)).contains(".") | public String toString() {
if (isValid()) {
String longitude = Double.toString(coordinates.get(0));
String latitude = Double.toString(coordinates.get(1));
return String.format(
Locale.ROOT,
"{type=Point, coordinates=[%s, %s], crs={%s}}", "" + longitude, latitude,
coordinateSystem);
}
return "";
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} | class GeoPoint {
private static final String POINT = "Point";
@JsonProperty
private String type;
@JsonProperty
private List<Double> coordinates;
@JsonProperty("crs")
private CoordinateSystem coordinateSystem;
private GeoPoint() {
this.coordinateSystem = CoordinateSystem.create();
this.type = POINT;
}
/**
* Retrieve GeoPoint type
* @return String type
*/
@JsonProperty
public String getType() {
return type;
}
/**
* Create GeoPoint object from latitude and longitude
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude) {
return new GeoPoint().setCoordinates(Arrays.asList(longitude, latitude));
}
/**
* Create GeoPoint object from latitude, longitude and coordinate system
* @param latitude latitude value of the GeographyPoint
* @param longitude longitude value of the GeographyPoint
* @param coordinateSystem EPSG:4326 coordination system
* @return Add desc
*/
public static GeoPoint create(double latitude, double longitude, CoordinateSystem coordinateSystem) {
return create(latitude, longitude).setCoordinateSystem(coordinateSystem);
}
/**
* Ensures that the GeoPoint values are valid for the Geography Point type in Azure Cognitive Search service.
*
* @return true if valid, false if invalid
*/
@JsonIgnore
public boolean isValid() {
return coordinates != null && coordinates.size() == 2
&& coordinates.get(0) != null && coordinates.get(1) != null
&& coordinates.get(0) >= -180.0 && coordinates.get(0) <= 180.0
&& coordinates.get(1) >= -90.0 && coordinates.get(1) <= 90.0
&& (coordinateSystem == null || coordinateSystem.isValid());
}
/**
* Checks equality between two Geo Points
* @param o other GeoPoint
* @return true if equal
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GeoPoint other = (GeoPoint) o;
if (!this.isValid() || !other.isValid()) {
return false;
}
return Objects.equals(coordinates.get(0), other.coordinates.get(0))
&& Objects.equals(coordinates.get(1), other.coordinates.get(1))
&& Objects.equals(coordinateSystem, other.coordinateSystem);
}
/**
* Returns hash code for Geo Point
* @return int representing hash code
*/
@Override
public int hashCode() {
return Objects.hash(coordinates, coordinateSystem);
}
/**
* Retrieve GeoPoint string representation
* @return String
*/
@Override
/**
* Return latitude
* @return value of latitude coordinate
*/
@JsonIgnore
public double getLatitude() {
return coordinates.get(1);
}
/**
* Return longitude
* @return value of longitude coordinate
*/
@JsonIgnore
public double getLongitude() {
return coordinates.get(0);
}
/**
* Set coordinates
* @param coordinates list of coordinates
* @return GeoPoint updated
*/
public GeoPoint setCoordinates(List<Double> coordinates) {
this.coordinates = coordinates;
return this;
}
/**
* Retrieve GeoPoint CoordinateSystem
* @return CoordinateSystem
*/
public CoordinateSystem getCoordinateSystem() {
return coordinateSystem;
}
/**
* Set CoordinateSystem
* @param coordinateSystem CoordinateSystem
* @return GeoPoint updated
*/
public GeoPoint setCoordinateSystem(CoordinateSystem coordinateSystem) {
this.coordinateSystem = coordinateSystem;
return this;
}
} |
Should this be a common class in azure-core? Iso8601 seems like a common format to deserialize? | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | Object obj = defaultDeserializer.deserialize(jp, ctxt); | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
}
} |
DateTimeSerializer is supposed to deserialize and deserialize this format.. should the fix be in there? https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/core/azure-core/src/main/java/com/azure/core/util/serializer/DateTimeSerializer.java#L18 | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | Object obj = defaultDeserializer.deserialize(jp, ctxt); | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
}
} |
Search allows to upload user-defined documents with any date classes. The link you have is to support Joda time (OffsetDateTime, LocalDateTime etc). What we are trying to do here is to support old date lib (java.util.Date) We agreed to put serializer to search only, since Search has specific format and time zone requirement. | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | Object obj = defaultDeserializer.deserialize(jp, ctxt); | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
}
} |
This is needed for every block. Just tried to minimize the code with local var. | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | Object obj = defaultDeserializer.deserialize(jp, ctxt); | public Object deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
Object obj = defaultDeserializer.deserialize(jp, ctxt);
if (jp.currentTokenId() == JsonTokenId.ID_START_OBJECT) {
return parseDateType(obj);
} else if (jp.currentTokenId() == JsonTokenId.ID_START_ARRAY) {
List<?> list = (List) obj;
return list.stream()
.map(this::parseDateType)
.collect(Collectors.toList());
} else {
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").parse((String) obj);
} catch (ParseException e) {
}
return obj;
}
} | class Iso8601DateDeserializer extends UntypedObjectDeserializer {
private static final long serialVersionUID = 1L;
private final UntypedObjectDeserializer defaultDeserializer;
private static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
protected Iso8601DateDeserializer(final UntypedObjectDeserializer defaultDeserializer) {
super(null, null);
this.defaultDeserializer = defaultDeserializer;
}
@Override
private Object parseDateType(Object obj) {
try {
return new SimpleDateFormat(ISO8601_FORMAT).parse((String) obj);
} catch (ParseException e) {
return obj;
}
}
} |
SimpleDateFormat has some shortage of converting am/pm. I convert the date value to offsetDateTime to guarantee the accuracy. | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
`Date` is assumed UTC but is that a contractual agreement in the model? Either way this is the best option we have in supporting it so I'm good with it. | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
format.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateString = format.format(dateValue);
gen.writeString(dateString);
} | SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); | public void serialize(Date dateValue, JsonGenerator gen, SerializerProvider serializers) throws IOException {
String dateString = dateValue.toInstant().atOffset(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
gen.writeString(dateString);
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
@Override
} | class Iso8601DateSerializer extends JsonSerializer<Date> {
/**
* Gets a module wrapping this serializer as an adapter for the Jackson
* ObjectMapper.
*
* @return a simple module to be plugged onto Jackson ObjectMapper.
*/
public static SimpleModule getModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Date.class, new Iso8601DateSerializer());
return module;
}
/**
* Serializes the date value to service accepted iso8601 format with UTC time zone.
*
* @param dateValue The {@link java.util.Date} value.
* @param gen Generator used to output resulting Json content
* @param serializers Provider that can be used to get serializers for serializing Objects value contains, if any.
* @throws IOException Throws exception when the dateValue cannot convert to json content.
*/
@Override
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.