comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
added | public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
if (slf4jLogger == null) {
defaultLogger.log(logLevel, message, null);
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled.", e);
slf4jLogger = null;
}
} | defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled.", e); | public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
MethodType getLoggerMethodType = MethodType.methodType(loggerClass, String.class);
getLoggerMethodHandle = lookup.findStatic(loggerFactoryClass, "getLogger", getLoggerMethodType);
MethodType logMethodType = MethodType.methodType(void.class, String.class, Throwable.class);
logVerboseMethodHandle = lookup.findVirtual(loggerClass, "debug", logMethodType);
logInfoMethodHandle = lookup.findVirtual(loggerClass, "info", logMethodType);
logWarnMethodHandle = lookup.findVirtual(loggerClass, "warn", logMethodType);
logErrorMethodHandle = lookup.findVirtual(loggerClass, "error", logMethodType);
MethodType isEnabledMethodType = MethodType.methodType(boolean.class);
isVerboseEnabledMethodHandle = lookup.findVirtual(loggerClass, "isDebugEnabled", isEnabledMethodType);
isInfoEnabledMethodHandle = lookup.findVirtual(loggerClass, "isInfoEnabled", isEnabledMethodType);
isWarnEnabledMethodHandle = lookup.findVirtual(loggerClass, "isWarnEnabled", isEnabledMethodType);
isErrorEnabledMethodHandle = lookup.findVirtual(loggerClass, "isErrorEnabled", isEnabledMethodType);
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
isVerboseEnabled = isSlf4jEnabledAtLevel(slf4jLogger, VERBOSE) || defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = isSlf4jEnabledAtLevel(slf4jLogger, INFORMATIONAL) || defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = isSlf4jEnabledAtLevel(slf4jLogger, WARNING) || defaultLogger.isEnabled(WARNING);
isErrorEnabled = isSlf4jEnabledAtLevel(slf4jLogger, ERROR) || defaultLogger.isEnabled(ERROR);
} catch (ReflectiveOperationException e) {
slf4jLogger = null;
DEFAULT_LOGGER.log(WARNING, "Failed to initialize Slf4jLoggerShim.", e);
isVerboseEnabled = defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = defaultLogger.isEnabled(WARNING);
isErrorEnabled = defaultLogger.isEnabled(ERROR);
}
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
DEFAULT_LOGGER.log(VERBOSE, "Resolved NOPLogger, SLF4J logging will be disabled.", null);
return null;
}
return logger;
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to create SLF4J logger, SLF4J logging will be disabled.", e);
return null;
}
}
private static boolean isSlf4jEnabledAtLevel(Object logger, ClientLogger.LogLevel logLevel)
throws ReflectiveOperationException {
if (logger == null) {
return false;
}
try {
switch (logLevel) {
case VERBOSE:
return (boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(logger);
case INFORMATIONAL:
return (boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(logger);
case WARNING:
return (boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(logger);
case ERROR:
return (boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(logger);
default:
return false;
}
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to check if log level is enabled, SLF4J logging will be disabled.", e);
return false;
}
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
}
private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} |
Do we want to pre-check if the `QueryInfo` payload has `hasNonStreamingOrderBy` property? | public boolean hasNonStreamingOrderBy() {
this.nonStreamingOrderBy = Boolean.TRUE.equals(super.getBoolean("hasNonStreamingOrderBy"));
return this.nonStreamingOrderBy;
} | this.nonStreamingOrderBy = Boolean.TRUE.equals(super.getBoolean("hasNonStreamingOrderBy")); | public boolean hasNonStreamingOrderBy() {
this.nonStreamingOrderBy = Boolean.TRUE.equals(super.getBoolean("hasNonStreamingOrderBy"));
return this.nonStreamingOrderBy;
} | class QueryInfo extends JsonSerializable {
public static final QueryInfo EMPTY = new QueryInfo();
private static final String HAS_SELECT_VALUE = "hasSelectValue";
private Integer top;
private List<SortOrder> orderBy;
private Collection<AggregateOperator> aggregates;
private Collection<String> orderByExpressions;
private String rewrittenQuery;
private Integer offset;
private Integer limit;
private DistinctQueryType distinctQueryType;
private QueryPlanDiagnosticsContext queryPlanDiagnosticsContext;
private DCountInfo dCountInfo;
private boolean nonStreamingOrderBy;
public QueryInfo() {
}
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public QueryInfo(ObjectNode objectNode) {
super(objectNode);
}
public QueryInfo(String jsonString) {
super(jsonString);
}
public Integer getTop() {
return this.top != null ? this.top : (this.top = super.getInt("top"));
}
public List<SortOrder> getOrderBy() {
return this.orderBy != null ? this.orderBy : (this.orderBy = super.getList("orderBy", SortOrder.class));
}
public String getRewrittenQuery() {
return this.rewrittenQuery != null ? this.rewrittenQuery
: (this.rewrittenQuery = super.getString("rewrittenQuery"));
}
public boolean hasTop() {
return this.getTop() != null;
}
public boolean hasOrderBy() {
Collection<SortOrder> orderBy = this.getOrderBy();
return orderBy != null && orderBy.size() > 0;
}
public boolean hasRewrittenQuery() {
return !StringUtils.isEmpty(this.getRewrittenQuery());
}
public boolean hasAggregates() {
Collection<AggregateOperator> aggregates = this.getAggregates();
boolean hasAggregates = aggregates != null && aggregates.size() > 0;
if (hasAggregates) {
return hasAggregates;
}
boolean aggregateAliasMappingNonEmpty = (this.getGroupByAliasToAggregateType() != null)
&& !this.getGroupByAliasToAggregateType()
.values()
.isEmpty();
return aggregateAliasMappingNonEmpty;
}
public Collection<AggregateOperator> getAggregates() {
return this.aggregates != null
? this.aggregates
: (this.aggregates = super.getCollection("aggregates", AggregateOperator.class));
}
public Collection<String> getOrderByExpressions() {
return this.orderByExpressions != null
? this.orderByExpressions
: (this.orderByExpressions = super.getCollection("orderByExpressions", String.class));
}
public boolean hasSelectValue() {
return super.has(HAS_SELECT_VALUE) && Boolean.TRUE.equals(super.getBoolean(HAS_SELECT_VALUE));
}
public boolean hasOffset() {
return this.getOffset() != null;
}
public boolean hasLimit() {
return this.getLimit() != null;
}
public Integer getLimit() {
return this.limit != null ? this.limit : (this.limit = super.getInt("limit"));
}
public Integer getOffset() {
return this.offset != null ? this.offset : (this.offset = super.getInt("offset"));
}
public boolean hasDistinct() {
return this.getDistinctQueryType() != DistinctQueryType.NONE;
}
public DistinctQueryType getDistinctQueryType() {
if (distinctQueryType != null) {
return distinctQueryType;
} else {
final String distinctType = super.getString("distinctType");
if (distinctType == null) {
return DistinctQueryType.NONE;
}
switch (distinctType) {
case "Ordered":
distinctQueryType = DistinctQueryType.ORDERED;
break;
case "Unordered":
distinctQueryType = DistinctQueryType.UNORDERED;
break;
default:
distinctQueryType = DistinctQueryType.NONE;
break;
}
return distinctQueryType;
}
}
public boolean hasGroupBy() {
final List<String> groupByExpressions = super.getList("groupByExpressions", String.class);
return groupByExpressions != null && !groupByExpressions.isEmpty();
}
public Map<String, AggregateOperator> getGroupByAliasToAggregateType(){
Map<String, AggregateOperator> groupByAliasToAggregateMap;
groupByAliasToAggregateMap = super.getMap("groupByAliasToAggregateType");
return groupByAliasToAggregateMap;
}
public List<String> getGroupByAliases() {
return super.getList("groupByAliases", String.class);
}
public boolean hasDCount() {
return this.getDCountInfo() != null;
}
public DCountInfo getDCountInfo() {
return this.dCountInfo != null ?
this.dCountInfo : (this.dCountInfo = super.getObject("dCountInfo", DCountInfo.class));
}
public String getDCountAlias() {
return this.dCountInfo.getDCountAlias();
}
public boolean isValueAggregate() {
return Strings.isNullOrEmpty(this.getDCountAlias());
}
public QueryPlanDiagnosticsContext getQueryPlanDiagnosticsContext() {
return queryPlanDiagnosticsContext;
}
public void setQueryPlanDiagnosticsContext(QueryPlanDiagnosticsContext queryPlanDiagnosticsContext) {
this.queryPlanDiagnosticsContext = queryPlanDiagnosticsContext;
}
public static final class QueryPlanDiagnosticsContext {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant endTimeUTC;
private volatile RequestTimeline requestTimeline;
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
}
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC, RequestTimeline requestTimeline) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
this.requestTimeline = requestTimeline;
}
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
@JsonIgnore
public Duration getDuration() {
if (startTimeUTC == null ||
endTimeUTC == null ||
endTimeUTC.isBefore(startTimeUTC)) {
return null;
}
return Duration.between(startTimeUTC, endTimeUTC);
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
} | class QueryInfo extends JsonSerializable {
public static final QueryInfo EMPTY = new QueryInfo();
private static final String HAS_SELECT_VALUE = "hasSelectValue";
private Integer top;
private List<SortOrder> orderBy;
private Collection<AggregateOperator> aggregates;
private Collection<String> orderByExpressions;
private String rewrittenQuery;
private Integer offset;
private Integer limit;
private DistinctQueryType distinctQueryType;
private QueryPlanDiagnosticsContext queryPlanDiagnosticsContext;
private DCountInfo dCountInfo;
private boolean nonStreamingOrderBy;
public QueryInfo() {
}
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public QueryInfo(ObjectNode objectNode) {
super(objectNode);
}
public QueryInfo(String jsonString) {
super(jsonString);
}
public Integer getTop() {
return this.top != null ? this.top : (this.top = super.getInt("top"));
}
public List<SortOrder> getOrderBy() {
return this.orderBy != null ? this.orderBy : (this.orderBy = super.getList("orderBy", SortOrder.class));
}
public String getRewrittenQuery() {
return this.rewrittenQuery != null ? this.rewrittenQuery
: (this.rewrittenQuery = super.getString("rewrittenQuery"));
}
public boolean hasTop() {
return this.getTop() != null;
}
public boolean hasOrderBy() {
Collection<SortOrder> orderBy = this.getOrderBy();
return orderBy != null && orderBy.size() > 0;
}
public boolean hasRewrittenQuery() {
return !StringUtils.isEmpty(this.getRewrittenQuery());
}
public boolean hasAggregates() {
Collection<AggregateOperator> aggregates = this.getAggregates();
boolean hasAggregates = aggregates != null && aggregates.size() > 0;
if (hasAggregates) {
return hasAggregates;
}
boolean aggregateAliasMappingNonEmpty = (this.getGroupByAliasToAggregateType() != null)
&& !this.getGroupByAliasToAggregateType()
.values()
.isEmpty();
return aggregateAliasMappingNonEmpty;
}
public Collection<AggregateOperator> getAggregates() {
return this.aggregates != null
? this.aggregates
: (this.aggregates = super.getCollection("aggregates", AggregateOperator.class));
}
public Collection<String> getOrderByExpressions() {
return this.orderByExpressions != null
? this.orderByExpressions
: (this.orderByExpressions = super.getCollection("orderByExpressions", String.class));
}
public boolean hasSelectValue() {
return super.has(HAS_SELECT_VALUE) && Boolean.TRUE.equals(super.getBoolean(HAS_SELECT_VALUE));
}
public boolean hasOffset() {
return this.getOffset() != null;
}
public boolean hasLimit() {
return this.getLimit() != null;
}
public Integer getLimit() {
return this.limit != null ? this.limit : (this.limit = super.getInt("limit"));
}
public Integer getOffset() {
return this.offset != null ? this.offset : (this.offset = super.getInt("offset"));
}
public boolean hasDistinct() {
return this.getDistinctQueryType() != DistinctQueryType.NONE;
}
public DistinctQueryType getDistinctQueryType() {
if (distinctQueryType != null) {
return distinctQueryType;
} else {
final String distinctType = super.getString("distinctType");
if (distinctType == null) {
return DistinctQueryType.NONE;
}
switch (distinctType) {
case "Ordered":
distinctQueryType = DistinctQueryType.ORDERED;
break;
case "Unordered":
distinctQueryType = DistinctQueryType.UNORDERED;
break;
default:
distinctQueryType = DistinctQueryType.NONE;
break;
}
return distinctQueryType;
}
}
public boolean hasGroupBy() {
final List<String> groupByExpressions = super.getList("groupByExpressions", String.class);
return groupByExpressions != null && !groupByExpressions.isEmpty();
}
public Map<String, AggregateOperator> getGroupByAliasToAggregateType(){
Map<String, AggregateOperator> groupByAliasToAggregateMap;
groupByAliasToAggregateMap = super.getMap("groupByAliasToAggregateType");
return groupByAliasToAggregateMap;
}
public List<String> getGroupByAliases() {
return super.getList("groupByAliases", String.class);
}
public boolean hasDCount() {
return this.getDCountInfo() != null;
}
public DCountInfo getDCountInfo() {
return this.dCountInfo != null ?
this.dCountInfo : (this.dCountInfo = super.getObject("dCountInfo", DCountInfo.class));
}
public String getDCountAlias() {
return this.dCountInfo.getDCountAlias();
}
public boolean isValueAggregate() {
return Strings.isNullOrEmpty(this.getDCountAlias());
}
public QueryPlanDiagnosticsContext getQueryPlanDiagnosticsContext() {
return queryPlanDiagnosticsContext;
}
public void setQueryPlanDiagnosticsContext(QueryPlanDiagnosticsContext queryPlanDiagnosticsContext) {
this.queryPlanDiagnosticsContext = queryPlanDiagnosticsContext;
}
public static final class QueryPlanDiagnosticsContext {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant endTimeUTC;
private volatile RequestTimeline requestTimeline;
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
}
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC, RequestTimeline requestTimeline) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
this.requestTimeline = requestTimeline;
}
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
@JsonIgnore
public Duration getDuration() {
if (startTimeUTC == null ||
endTimeUTC == null ||
endTimeUTC.isBefore(startTimeUTC)) {
return null;
}
return Duration.between(startTimeUTC, endTimeUTC);
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
} |
As per the discussion with the BE team, the `QueryInfo` being passed by BE will have `hasNonStreamingOrderBy`. It would just be set to false for queries which are not non streaming order by queries. | public boolean hasNonStreamingOrderBy() {
this.nonStreamingOrderBy = Boolean.TRUE.equals(super.getBoolean("hasNonStreamingOrderBy"));
return this.nonStreamingOrderBy;
} | this.nonStreamingOrderBy = Boolean.TRUE.equals(super.getBoolean("hasNonStreamingOrderBy")); | public boolean hasNonStreamingOrderBy() {
this.nonStreamingOrderBy = Boolean.TRUE.equals(super.getBoolean("hasNonStreamingOrderBy"));
return this.nonStreamingOrderBy;
} | class QueryInfo extends JsonSerializable {
public static final QueryInfo EMPTY = new QueryInfo();
private static final String HAS_SELECT_VALUE = "hasSelectValue";
private Integer top;
private List<SortOrder> orderBy;
private Collection<AggregateOperator> aggregates;
private Collection<String> orderByExpressions;
private String rewrittenQuery;
private Integer offset;
private Integer limit;
private DistinctQueryType distinctQueryType;
private QueryPlanDiagnosticsContext queryPlanDiagnosticsContext;
private DCountInfo dCountInfo;
private boolean nonStreamingOrderBy;
public QueryInfo() {
}
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public QueryInfo(ObjectNode objectNode) {
super(objectNode);
}
public QueryInfo(String jsonString) {
super(jsonString);
}
public Integer getTop() {
return this.top != null ? this.top : (this.top = super.getInt("top"));
}
public List<SortOrder> getOrderBy() {
return this.orderBy != null ? this.orderBy : (this.orderBy = super.getList("orderBy", SortOrder.class));
}
public String getRewrittenQuery() {
return this.rewrittenQuery != null ? this.rewrittenQuery
: (this.rewrittenQuery = super.getString("rewrittenQuery"));
}
public boolean hasTop() {
return this.getTop() != null;
}
public boolean hasOrderBy() {
Collection<SortOrder> orderBy = this.getOrderBy();
return orderBy != null && orderBy.size() > 0;
}
public boolean hasRewrittenQuery() {
return !StringUtils.isEmpty(this.getRewrittenQuery());
}
public boolean hasAggregates() {
Collection<AggregateOperator> aggregates = this.getAggregates();
boolean hasAggregates = aggregates != null && aggregates.size() > 0;
if (hasAggregates) {
return hasAggregates;
}
boolean aggregateAliasMappingNonEmpty = (this.getGroupByAliasToAggregateType() != null)
&& !this.getGroupByAliasToAggregateType()
.values()
.isEmpty();
return aggregateAliasMappingNonEmpty;
}
public Collection<AggregateOperator> getAggregates() {
return this.aggregates != null
? this.aggregates
: (this.aggregates = super.getCollection("aggregates", AggregateOperator.class));
}
public Collection<String> getOrderByExpressions() {
return this.orderByExpressions != null
? this.orderByExpressions
: (this.orderByExpressions = super.getCollection("orderByExpressions", String.class));
}
public boolean hasSelectValue() {
return super.has(HAS_SELECT_VALUE) && Boolean.TRUE.equals(super.getBoolean(HAS_SELECT_VALUE));
}
public boolean hasOffset() {
return this.getOffset() != null;
}
public boolean hasLimit() {
return this.getLimit() != null;
}
public Integer getLimit() {
return this.limit != null ? this.limit : (this.limit = super.getInt("limit"));
}
public Integer getOffset() {
return this.offset != null ? this.offset : (this.offset = super.getInt("offset"));
}
public boolean hasDistinct() {
return this.getDistinctQueryType() != DistinctQueryType.NONE;
}
public DistinctQueryType getDistinctQueryType() {
if (distinctQueryType != null) {
return distinctQueryType;
} else {
final String distinctType = super.getString("distinctType");
if (distinctType == null) {
return DistinctQueryType.NONE;
}
switch (distinctType) {
case "Ordered":
distinctQueryType = DistinctQueryType.ORDERED;
break;
case "Unordered":
distinctQueryType = DistinctQueryType.UNORDERED;
break;
default:
distinctQueryType = DistinctQueryType.NONE;
break;
}
return distinctQueryType;
}
}
public boolean hasGroupBy() {
final List<String> groupByExpressions = super.getList("groupByExpressions", String.class);
return groupByExpressions != null && !groupByExpressions.isEmpty();
}
public Map<String, AggregateOperator> getGroupByAliasToAggregateType(){
Map<String, AggregateOperator> groupByAliasToAggregateMap;
groupByAliasToAggregateMap = super.getMap("groupByAliasToAggregateType");
return groupByAliasToAggregateMap;
}
public List<String> getGroupByAliases() {
return super.getList("groupByAliases", String.class);
}
public boolean hasDCount() {
return this.getDCountInfo() != null;
}
public DCountInfo getDCountInfo() {
return this.dCountInfo != null ?
this.dCountInfo : (this.dCountInfo = super.getObject("dCountInfo", DCountInfo.class));
}
public String getDCountAlias() {
return this.dCountInfo.getDCountAlias();
}
public boolean isValueAggregate() {
return Strings.isNullOrEmpty(this.getDCountAlias());
}
public QueryPlanDiagnosticsContext getQueryPlanDiagnosticsContext() {
return queryPlanDiagnosticsContext;
}
public void setQueryPlanDiagnosticsContext(QueryPlanDiagnosticsContext queryPlanDiagnosticsContext) {
this.queryPlanDiagnosticsContext = queryPlanDiagnosticsContext;
}
public static final class QueryPlanDiagnosticsContext {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant endTimeUTC;
private volatile RequestTimeline requestTimeline;
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
}
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC, RequestTimeline requestTimeline) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
this.requestTimeline = requestTimeline;
}
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
@JsonIgnore
public Duration getDuration() {
if (startTimeUTC == null ||
endTimeUTC == null ||
endTimeUTC.isBefore(startTimeUTC)) {
return null;
}
return Duration.between(startTimeUTC, endTimeUTC);
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
} | class QueryInfo extends JsonSerializable {
public static final QueryInfo EMPTY = new QueryInfo();
private static final String HAS_SELECT_VALUE = "hasSelectValue";
private Integer top;
private List<SortOrder> orderBy;
private Collection<AggregateOperator> aggregates;
private Collection<String> orderByExpressions;
private String rewrittenQuery;
private Integer offset;
private Integer limit;
private DistinctQueryType distinctQueryType;
private QueryPlanDiagnosticsContext queryPlanDiagnosticsContext;
private DCountInfo dCountInfo;
private boolean nonStreamingOrderBy;
public QueryInfo() {
}
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public QueryInfo(ObjectNode objectNode) {
super(objectNode);
}
public QueryInfo(String jsonString) {
super(jsonString);
}
public Integer getTop() {
return this.top != null ? this.top : (this.top = super.getInt("top"));
}
public List<SortOrder> getOrderBy() {
return this.orderBy != null ? this.orderBy : (this.orderBy = super.getList("orderBy", SortOrder.class));
}
public String getRewrittenQuery() {
return this.rewrittenQuery != null ? this.rewrittenQuery
: (this.rewrittenQuery = super.getString("rewrittenQuery"));
}
public boolean hasTop() {
return this.getTop() != null;
}
public boolean hasOrderBy() {
Collection<SortOrder> orderBy = this.getOrderBy();
return orderBy != null && orderBy.size() > 0;
}
public boolean hasRewrittenQuery() {
return !StringUtils.isEmpty(this.getRewrittenQuery());
}
public boolean hasAggregates() {
Collection<AggregateOperator> aggregates = this.getAggregates();
boolean hasAggregates = aggregates != null && aggregates.size() > 0;
if (hasAggregates) {
return hasAggregates;
}
boolean aggregateAliasMappingNonEmpty = (this.getGroupByAliasToAggregateType() != null)
&& !this.getGroupByAliasToAggregateType()
.values()
.isEmpty();
return aggregateAliasMappingNonEmpty;
}
public Collection<AggregateOperator> getAggregates() {
return this.aggregates != null
? this.aggregates
: (this.aggregates = super.getCollection("aggregates", AggregateOperator.class));
}
public Collection<String> getOrderByExpressions() {
return this.orderByExpressions != null
? this.orderByExpressions
: (this.orderByExpressions = super.getCollection("orderByExpressions", String.class));
}
public boolean hasSelectValue() {
return super.has(HAS_SELECT_VALUE) && Boolean.TRUE.equals(super.getBoolean(HAS_SELECT_VALUE));
}
public boolean hasOffset() {
return this.getOffset() != null;
}
public boolean hasLimit() {
return this.getLimit() != null;
}
public Integer getLimit() {
return this.limit != null ? this.limit : (this.limit = super.getInt("limit"));
}
public Integer getOffset() {
return this.offset != null ? this.offset : (this.offset = super.getInt("offset"));
}
public boolean hasDistinct() {
return this.getDistinctQueryType() != DistinctQueryType.NONE;
}
public DistinctQueryType getDistinctQueryType() {
if (distinctQueryType != null) {
return distinctQueryType;
} else {
final String distinctType = super.getString("distinctType");
if (distinctType == null) {
return DistinctQueryType.NONE;
}
switch (distinctType) {
case "Ordered":
distinctQueryType = DistinctQueryType.ORDERED;
break;
case "Unordered":
distinctQueryType = DistinctQueryType.UNORDERED;
break;
default:
distinctQueryType = DistinctQueryType.NONE;
break;
}
return distinctQueryType;
}
}
public boolean hasGroupBy() {
final List<String> groupByExpressions = super.getList("groupByExpressions", String.class);
return groupByExpressions != null && !groupByExpressions.isEmpty();
}
public Map<String, AggregateOperator> getGroupByAliasToAggregateType(){
Map<String, AggregateOperator> groupByAliasToAggregateMap;
groupByAliasToAggregateMap = super.getMap("groupByAliasToAggregateType");
return groupByAliasToAggregateMap;
}
public List<String> getGroupByAliases() {
return super.getList("groupByAliases", String.class);
}
public boolean hasDCount() {
return this.getDCountInfo() != null;
}
public DCountInfo getDCountInfo() {
return this.dCountInfo != null ?
this.dCountInfo : (this.dCountInfo = super.getObject("dCountInfo", DCountInfo.class));
}
public String getDCountAlias() {
return this.dCountInfo.getDCountAlias();
}
public boolean isValueAggregate() {
return Strings.isNullOrEmpty(this.getDCountAlias());
}
public QueryPlanDiagnosticsContext getQueryPlanDiagnosticsContext() {
return queryPlanDiagnosticsContext;
}
public void setQueryPlanDiagnosticsContext(QueryPlanDiagnosticsContext queryPlanDiagnosticsContext) {
this.queryPlanDiagnosticsContext = queryPlanDiagnosticsContext;
}
public static final class QueryPlanDiagnosticsContext {
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant startTimeUTC;
@JsonSerialize(using = DiagnosticsInstantSerializer.class)
private volatile Instant endTimeUTC;
private volatile RequestTimeline requestTimeline;
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
}
public QueryPlanDiagnosticsContext(Instant startTimeUTC, Instant endTimeUTC, RequestTimeline requestTimeline) {
this.startTimeUTC = startTimeUTC;
this.endTimeUTC = endTimeUTC;
this.requestTimeline = requestTimeline;
}
public Instant getStartTimeUTC() {
return startTimeUTC;
}
public Instant getEndTimeUTC() {
return endTimeUTC;
}
@JsonIgnore
public Duration getDuration() {
if (startTimeUTC == null ||
endTimeUTC == null ||
endTimeUTC.isBefore(startTimeUTC)) {
return null;
}
return Duration.between(startTimeUTC, endTimeUTC);
}
public RequestTimeline getRequestTimeline() {
return requestTimeline;
}
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
} |
Can we also add tests with container with multi-partitions? | public void before_NonStreamingOrderByQueryVectorSearchTest() {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode(DirectConnectionConfig.getDefaultConfig())
.consistencyLevel(ConsistencyLevel.SESSION)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
database = createDatabase(client, databaseId);
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/pk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties containerProperties = new CosmosContainerProperties(flatContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties).block();
flatIndexContainer = database.getContainer(flatContainerId);
containerProperties = new CosmosContainerProperties(quantizedContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties).block();
quantizedIndexContainer = database.getContainer(quantizedContainerId);
containerProperties = new CosmosContainerProperties(largeDataContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(2L));
database.createContainer(containerProperties).block();
largeDataContainer = database.getContainer(largeDataContainerId);
for (Document doc: getVectorDocs()) {
flatIndexContainer.createItem(doc).block();
quantizedIndexContainer.createItem(doc).block();
}
} | database.createContainer(containerProperties).block(); | public void before_NonStreamingOrderByQueryVectorSearchTest() {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode(DirectConnectionConfig.getDefaultConfig())
.consistencyLevel(ConsistencyLevel.SESSION)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
database = createDatabase(client, databaseId);
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/pk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties containerProperties = new CosmosContainerProperties(flatContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties).block();
flatIndexContainer = database.getContainer(flatContainerId);
containerProperties = new CosmosContainerProperties(quantizedContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(20000)).block();
quantizedIndexContainer = database.getContainer(quantizedContainerId);
containerProperties = new CosmosContainerProperties(largeDataContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(2L));
database.createContainer(containerProperties).block();
largeDataContainer = database.getContainer(largeDataContainerId);
for (Document doc : getVectorDocs()) {
flatIndexContainer.createItem(doc).block();
quantizedIndexContainer.createItem(doc).block();
}
} | class NonStreamingOrderByQueryVectorSearchTest {
protected static final int TIMEOUT = 30000;
protected static final int SETUP_TIMEOUT = 20000;
protected static final int SHUTDOWN_TIMEOUT = 20000;
protected static Logger logger = LoggerFactory.getLogger(NonStreamingOrderByQueryVectorSearchTest.class.getSimpleName());
private final String databaseId = CosmosDatabaseForTest.generateId();
private final String flatContainerId = "flat_" + UUID.randomUUID();
private final String quantizedContainerId = "quantized_" + UUID.randomUUID();
private final String largeDataContainerId = "large_data_" + UUID.randomUUID();
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer flatIndexContainer;
private CosmosAsyncContainer quantizedIndexContainer;
private CosmosAsyncContainer largeDataContainer;
@BeforeClass(groups = {"query"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"query"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeDeleteDatabase(database);
safeClose(client);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void flatIndexVectorSearch() {
String queryVector = getQueryVector();
String vanilla_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = flatIndexContainer.queryItems(vanilla_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String dotproductSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(dotproductSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void quantizedIndexVectorSearch() {
String queryVector = getQueryVector();
String vanillaQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = quantizedIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, true);
String dotproduct_specs_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(dotproduct_specs_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT*1000)
public void largeDataVectorSearch() {
double embeddingValue = 0.0001;
for (int i=1;i<=2000;i++) {
Document doc = new Document(String.valueOf(i), String.valueOf(i%2), "text" + i, new double[] {embeddingValue, embeddingValue});
largeDataContainer.createItem(doc).block();
embeddingValue = 0.0001 * (i+1);
}
String query = String.format("SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000");
List<Document> resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = String.format("SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = String.format("SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = String.format("SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = String.format("SELECT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = String.format("SELECT DISTINCT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
}
private void validateOrdering(int top, List<Document> docs, boolean isEucledian) {
assertThat(docs.size()).isEqualTo(top);
if (isEucledian) {
for (int i=0;i<docs.size()-1;i++) {
assertThat(docs.get(i).getScore()).isLessThanOrEqualTo(docs.get(i+1).getScore());
}
} else {
for (int i=0;i<docs.size()-1;i++) {
assertThat(docs.get(i).getScore()).isGreaterThanOrEqualTo(docs.get(i+1).getScore());
}
}
}
private CosmosVectorEmbeddingPolicy populateVectorEmbeddingPolicy(Long dimensions) {
CosmosVectorEmbeddingPolicy policy = new CosmosVectorEmbeddingPolicy();
CosmosVectorEmbedding embedding = new CosmosVectorEmbedding();
embedding.setPath("/embedding");
embedding.setDataType(CosmosVectorDataType.FLOAT32);
embedding.setDimensions(dimensions);
embedding.setDistanceFunction(CosmosVectorDistanceFunction.COSINE);
policy.setCosmosVectorEmbeddings(Collections.singletonList(embedding));
return policy;
}
private IndexingPolicy populateIndexingPolicy(CosmosVectorIndexType vectorIndexType) {
IndexingPolicy indexingPolicy = new IndexingPolicy();
indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT);
IncludedPath includedPath1 = new IncludedPath("/*");
indexingPolicy.setIncludedPaths(Collections.singletonList(includedPath1));
CosmosVectorIndexSpec cosmosVectorIndexSpec = new CosmosVectorIndexSpec();
cosmosVectorIndexSpec.setPath("/embedding");
cosmosVectorIndexSpec.setType(vectorIndexType.toString());
indexingPolicy.setVectorIndexes(Collections.singletonList(cosmosVectorIndexSpec));
return indexingPolicy;
}
public List<Document> getVectorDocs() {
List<Document> docs = new ArrayList<>();
docs.add(new Document("item0", "1", "Good morning!", new double[] {-0.008334724, -0.05993167, -0.0903545, -0.04791922, -0.01825805, -0.053011455, 0.120733805, 0.017714009, 0.07346743, 0.11559805, 0.03262076, 0.074512, 0.015864266, 0.01981401, 0.007850527, 0.076296456, -0.08539284, 0.016593281, -0.05423011, 0.07520837, 0.074250855, 0.056754466, -0.022098986, 0.03155444, 0.04334927, 0.024655985, -0.02109795, 0.044023883, -0.027550288, -0.11350893, -0.022806242, 0.08608921, 0.009221513, 0.06659074, 0.09087678, 0.024830079, 0.0075513036, 0.036472578, 0.015418151, 0.060497474, 0.010940685, -0.059365865, 0.043566886, 0.00427073, -0.023546139, 0.030357545, -0.03403527, 0.1182965, 0.1115939, -0.018954424, 0.0032452107, 0.10297628, 0.15328929, -0.016952349, -0.04530782, 0.04674409, -8.351895e-05, -0.19376601, -0.025091218, -0.03664667, -0.011860116, -0.10454312, -0.13109237, -0.049268447, 0.17557324, 0.044872586, 0.046787616, 0.15337633, -0.019509347, 0.0077743605, 0.04556896, -0.08413066, -0.028681897, 0.1209079, 0.1357929, -0.09314, 0.12534729, -0.065546185, 0.12212656, 0.04892026, 0.07394619, -0.08134516, -0.004493787, 0.08138869, 0.028573086, 0.12290998, -0.16477945, -0.29839617, -0.08090993, 0.12256179, 0.16591106, -0.08173688, -0.034383457, -0.1076768, -0.043022845, -0.07655759, 0.2021225, 0.03923631, 0.07703635, -0.08587159, 0.06498038, -0.08330371, 0.16486649, -0.14040637, 0.02070624, -0.069855, 0.052880887, 0.016136287, 0.00024294876, -0.19968519, 0.06933272, 0.013241983, 0.0004002109, 0.14998151, 0.07516485, 0.18610589, -0.07895138, -0.108982496, -0.03494926, -0.027637335, -0.032925423, -0.009509855, 0.1182965, -0.075513035, -0.08665501, 0.019629037, 0.2583547, 0.00983084}));
docs.add(new Document("item1", "2", "Good afternoon!", new double[] {-0.04136167, -0.047741424, -0.08737179, -3.0502639e-05, 0.054595508, -0.11298566, 0.0906921, 0.108052626, 0.04729081, 0.21287979, -0.06588459, -0.052793052, -0.05568647, 0.017384235, -0.02518698, 0.021665072, -0.19238868, 0.03379609, 0.0075359354, -0.04989963, 0.055449303, 0.03282371, 0.026064493, 0.042096883, -0.007192045, 0.0786441, -0.09012291, 0.094012424, -0.0031483725, -0.0318039, -0.034721036, 0.10255038, 0.08851018, 0.11327027, 0.031614166, 0.006035863, 0.021321183, 0.0703433, 0.14201473, 0.058674756, -0.007986549, 0.03329804, 0.08884221, -0.09505595, -0.015522485, -0.068161376, 0.072572656, 0.049235567, 0.2263508, 0.029574543, -0.050563693, 0.050516263, 0.04660303, 0.08376687, 0.017514676, 0.0706279, 0.007921329, -0.1353741, -0.054358345, -0.1628853, -0.097617336, -0.123326086, -0.0489984, -0.0693472, 0.15396787, 0.027368903, 0.06042978, 0.22862759, -0.016293272, 0.033582643, 0.06697555, -0.10302471, -0.12104929, 0.18034068, 0.068303674, 0.003468546, 0.07480201, -0.1204801, 0.06787678, -0.042926963, 0.15785739, -0.034531303, -0.028934196, 0.022933908, -0.012012435, 0.004248228, -0.19172463, -0.31837103, -0.099230066, 0.02071641, 0.18546346, 0.06299117, -0.047053643, -0.09206767, 0.049472734, -0.046365865, 0.18214314, 0.0051079527, 0.105680965, -0.026443958, 0.072714955, -0.08073115, 0.09429702, -0.113744594, 0.02092986, -0.013625161, -0.07390078, 0.020550394, 0.011787128, -0.13499463, 0.015107445, -0.0015223064, 0.040104695, 0.094961084, 0.13404597, 0.082486175, -0.07428025, -0.026847139, -0.025163265, -0.09818654, 0.010043961, 0.013601444, 0.077932596, 0.016056107, -0.026965722, 0.045606934, 0.085047565, -0.005878741}));
docs.add(new Document("item2", "1", "Great weather today!", new double[] {0.07557184, -0.047335204, -0.054325826, 0.011354052, 0.07698824, -0.025358144, -0.07927276, 0.1510523, 0.022251202, 0.026249107, -0.021725763, 0.062047496, 0.14556946, -0.0006071819, -0.107098185, 0.021394506, -0.13771072, -0.10527057, 0.115048304, -0.048842985, -0.059443146, 0.060585406, -0.014940745, 0.18075103, 0.053777542, -0.022057017, 0.08493836, 0.1198001, 0.041555375, -0.066525154, 0.054280136, 0.107646465, 0.023359193, -0.0046632714, -0.1017981, 0.04105278, 0.11376897, 0.016437104, 0.067393266, 0.0961325, -0.017727856, -0.03333111, -0.020492123, -0.041943744, 0.0031040881, -0.028053876, 0.053640474, -0.02215982, 0.08205987, 0.015100661, -0.17133881, -0.05843796, 0.0895074, -0.04064157, -0.10225501, 0.11367759, -0.03593546, -0.010999952, -0.14273666, -0.010383132, 0.026112035, -0.14200561, -0.0634639, -0.04029889, 0.07383561, 0.116784535, 0.0805064, 0.13241065, -0.100884296, -0.10216363, -0.030384084, -0.07657703, -0.06323545, -0.013101708, 0.15233164, -0.009880538, 0.16448526, -0.11148446, -0.05606206, -0.11587073, 0.084892675, -0.1397211, -0.04948265, 0.006470896, 0.015637523, 0.09051259, -0.12665366, -0.28583884, -0.16046451, 0.055833608, 0.100244634, 0.013535767, -0.07155109, -0.10051877, 0.08662891, -0.0729218, 0.15818, 0.025449526, 0.05053353, -0.015740326, -0.0767141, -0.056381892, 0.091517776, -0.075114936, -0.04621579, -0.040458806, 0.03947646, 0.10901718, 0.0076417113, -0.34487078, 0.024375802, -0.072419204, 0.053000808, 0.04057303, -0.08434439, 0.027185759, 0.03403931, -0.06332683, 0.051858548, 0.011993717, 0.10728095, -0.09887392, 0.07593736, -0.0025172527, 0.1089258, -0.048934367, 0.051264573, 0.017008234}));
docs.add(new Document("item3", "2", "Hope you're doing well.", new double[] {-0.016385807, 0.16914073, -0.062722616, 0.009645036, 0.06367876, -0.10871283, -0.034588274, 0.0038275379, 0.107661076, 0.069654614, 0.036404934, -0.037910853, 0.08872956, 0.14571536, -0.082227826, 0.034731694, -0.07405285, 0.050340638, -0.16569862, -0.20671692, -0.0834708, 0.0043175584, -0.017234378, 0.052683175, 0.013863994, 0.12142946, -0.002630872, 0.069702424, 0.095804974, -0.10020321, 0.0793116, 0.028970966, 0.13940485, 0.15814514, 0.11597948, -0.03795866, -0.018178564, 0.14753202, -0.10670494, -0.055455975, 0.058037546, -0.04457991, -0.0046014115, -0.018704439, 0.07902476, -0.07902476, 0.031026661, -0.017855868, 0.098769, 0.118847884, -0.051488005, 0.009017572, 0.15068726, 0.044962365, 0.18233542, -0.0006853563, 0.11205931, -0.2602606, 0.09848216, 0.08681728, -0.077638365, -0.008664995, 0.010314333, -0.0361659, 0.047185384, 0.09202823, 0.004359389, -0.008545479, -0.08815587, 0.0765388, 0.012262463, 0.0542608, -0.10813915, 0.11722245, 0.013744476, -0.08265808, 0.008055458, 0.12085578, 0.056364305, -0.12907855, -0.05311344, -0.060666922, 0.117031224, 0.029233903, -0.148775, 0.017879771, -0.081558526, -0.26332027, -0.22029407, -0.07410065, 0.0059340284, -0.11234615, -0.06898532, 0.046516087, 0.06798138, -0.049193274, -0.04687464, -0.049002044, -0.03145692, 0.0065614935, -0.021274058, -0.060188852, -0.04584679, -0.015979448, 0.08949447, -0.050197218, -0.051440194, -0.1341461, 0.08557431, -0.08261028, -0.104314595, -0.016134819, 0.057320442, -0.022421423, 0.012501498, 0.055503782, 0.020568907, -0.095183484, 0.0049450235, -0.03525757, 0.17688543, -0.06888971, -0.005694994, 0.05622088, -0.04250031, 0.050053798, 0.16063109, 0.06353533}));
docs.add(new Document("item4", "1", "Excuse me please.", new double[] {-0.19188246, 0.018719073, -0.032395326, 0.09734556, 0.021671357, -0.11751684, -0.078514785, 0.16507255, -0.0012956136, 0.117006175, -0.065492816, 0.106282204, -0.009750514, -0.006008296, 0.021799022, 0.04643862, -0.046023704, -0.023442727, 0.12868765, -0.1466886, -0.085089594, -0.046885453, -0.0067742937, 0.048162118, -0.04739612, 0.021687314, -0.025581138, 0.04841745, -0.10519704, -0.039129723, -0.09747323, 0.10532471, 0.04375763, 0.09536674, 0.0145938555, -0.0060681393, 0.26171595, 0.1815415, -0.03833181, 0.012487361, -0.027192924, -0.12281499, 0.017937116, -0.02173519, 0.07308897, -0.06913131, 0.07417413, -0.01884674, 0.049023863, -0.049949445, 0.081068106, 0.22060739, -0.031645287, -0.024735348, -0.041108552, 0.1823075, -0.06230116, -0.119048834, -0.07813178, -0.0841321, -0.007711843, 0.039576557, -0.07589762, 0.028198296, 0.003087929, 0.047970615, 0.0845151, 0.08208944, 0.07423796, 0.01259907, 0.00046179298, 0.024671515, 0.10302671, 0.12160216, 0.1353263, -0.16251922, 0.069195144, -0.09160058, 0.033320908, -0.06341824, -0.06402466, -0.048864283, -0.10053722, -0.019341446, 0.027033342, -0.19354212, -0.011146865, -0.31329313, 0.054513514, -0.0098861605, 0.10277138, 0.059237167, 0.021495815, -0.0704718, 0.14285861, 0.042672466, 0.057769008, 0.054353934, -0.041363884, 0.07819562, 0.1085802, -0.0047874865, 0.0035626881, 0.025405597, 0.0032953867, 0.13430496, -0.084451266, -0.10883553, 0.115601845, -0.072259136, -0.06976964, -0.1081972, 0.08515343, 0.044715125, 0.05725834, -0.06759931, -0.0421618, -0.06185433, -0.068939805, -0.13673063, -0.032874074, -0.121538326, -0.010157451, -0.048608948, 0.049949445, 0.031310163, 0.13238996, 0.06855681}));
docs.add(new Document("item5", "2", "Is this the right place?", new double[] {-0.05695127, 0.07729321, -0.07253956, 0.054049686, -0.084886715, -0.1677979, -0.020681491, -0.010765179, -0.05312365, 0.10964277, -0.1724898, -0.0139754405, -0.019446775, -0.009877727, 0.10902541, 0.06599557, -0.20224646, -0.008658445, -0.11698933, -0.00034678154, 0.059760246, 0.023660243, 0.014523345, 0.058340326, -0.116927594, -0.0011546522, 0.035991967, 0.017857078, -0.21261807, -0.07568809, -0.007250097, 0.09525833, 0.073033445, -0.078157514, -0.14816591, -0.089578636, -0.006030815, 0.08519539, 0.059852853, 0.12328638, 0.08544234, -0.017656436, -0.03901702, 0.036238912, -0.09482618, 0.007215371, 0.15742627, 0.014183799, 0.107914165, 0.014245534, -0.011907292, 0.025188204, 0.057630364, -0.057321683, 0.0024366346, 0.034695517, 0.11766842, -0.16520499, 0.065193, 0.10822285, -0.06834152, -0.048925616, -0.078836605, 0.05161112, 0.07235435, 0.07636718, -0.075996764, 0.13902901, 0.023860885, 0.07846619, 0.02665443, -0.026870504, -0.0084115015, 0.07550287, 0.07500899, -0.07395948, 0.05062335, 0.05621044, -0.031531557, -0.001280053, 0.06908235, 0.078280985, -0.060068928, 0.14236274, 0.14236274, 0.06358787, -0.042474225, -0.38053942, 0.062785305, 0.050870296, 0.038677476, -0.078157514, 0.029309068, -0.07809578, 0.07439163, -0.06772417, 0.11896487, 0.073589064, 0.05238282, 0.018196626, 0.116371974, 0.0033067234, -0.020264775, -0.006304768, -0.10686466, 0.08408415, -0.04386328, -0.0068681072, 0.08994905, -0.059513304, 0.021823604, 0.049419504, -0.012817894, -0.093221046, 0.02802805, 0.089578636, 0.07124311, -0.26694557, 0.024833223, -0.03802925, -0.18483697, -0.1103836, 0.11877967, 0.07000839, -0.018860284, -0.044974525, -0.034016423, 0.022780508}));
docs.add(new Document("item6", "1", "Awful weather today.", new double[] {0.002305239, -0.02629875, 0.009952777, 0.026884208, -0.0067561795, -0.1118458, 0.07432968, 0.0999493, 0.02177902, -0.047726493, 0.042059265, 0.1583077, 0.11175212, 0.07685886, -0.14060347, 0.005084698, -0.19277944, -0.05606341, 0.082619764, -0.04241054, 0.09620237, 0.011832096, -0.023301208, 0.28214368, 0.02451896, 0.02793803, 0.03358184, 0.044424515, 0.11006601, 0.038101573, 0.0077982936, 0.045572013, 0.075922124, 0.016369391, -0.0039986745, 0.22781321, 0.062854715, 0.048054352, 0.010251361, 0.07170683, -0.018816603, 0.027329156, -0.08482108, -0.079481706, 0.042785235, 0.024120849, 0.17413847, 0.035431888, 0.12439801, 0.10641275, -0.14828467, -0.024425287, 0.083415985, 0.1184966, 0.0026799317, 0.15399873, -0.010304051, 0.009742012, -0.10781785, -0.019472316, 0.061777476, -0.09798217, 0.028804509, -0.023371464, 0.015491205, 0.07521958, 0.024003757, 0.013465522, -0.089692086, -0.097794816, 0.021193562, -0.0592483, -0.056438103, -0.10987866, 0.11802823, -0.06440032, 0.07704621, -0.040138967, -0.13891736, -0.16027485, 0.08631986, -0.16786237, -0.085757814, 0.015491205, -0.013243048, 0.09133137, -0.16196096, -0.2313728, -0.083134964, 0.12308659, 0.07559427, 0.09723278, 0.048663225, -0.13339064, 0.016345974, -0.13189186, 0.11025336, 0.03346475, -0.00688498, -0.059435643, 0.0056877197, 0.014999421, 0.063650936, -0.15053283, -0.017481761, 0.05194179, 0.061402783, 0.0077924393, 0.19971126, -0.07713988, 0.06425981, 0.0021369199, 0.12158781, -0.024331613, -0.008571098, 0.03264511, -0.0020242194, -0.05507984, -0.00805004, -0.03304322, 0.050630365, -0.1475353, -0.003735219, -0.0202217, 0.16271034, -0.059435643, 0.06023187, 0.06660164}));
docs.add(new Document("item7", "2", "Dinosaurs were huge.", new double[] {0.09842033, -0.025395654, -0.03831241, 0.037825905, 0.17008278, -0.022269849, -0.05935383, 0.032668933, -0.022148222, 0.08178179, -0.062710725, 0.164342, -0.024021273, -0.052640036, 0.027366007, 0.18098053, -0.18487258, -0.008903074, 0.16346629, 0.009407825, 0.110339746, -0.046315446, 0.08046822, 0.12512955, -0.06635953, 0.0070239417, 0.055753678, -0.0005336371, -0.012326866, 0.1578228, -0.023668556, -0.035904203, 0.05988899, -0.08032227, -0.11520481, 0.08499274, 0.05093726, -0.09248494, -0.011128843, -0.07064079, 0.01466818, 0.082073696, -0.040574666, -0.07292737, 0.03539337, 0.05025615, -0.1145237, 0.09584184, 0.04186391, -0.034833886, -0.18234275, -0.090441614, -0.09194978, -0.031331036, -0.13106494, 0.068208255, 0.03220675, -0.031233737, -0.04337208, 0.1289243, 0.1363192, 0.052494083, 0.03074723, -0.000113359885, 0.07681943, 0.03962598, 0.016529068, -0.04191256, -0.03612313, 0.023084749, 0.10917213, 0.09477153, -0.09652295, -0.0999285, 0.11685894, -0.012649177, 0.043688312, -0.10333405, -0.060521446, -0.042034186, 0.0483831, 0.0028141378, -0.17270991, 0.05424551, 0.25570798, 0.09511208, -0.08504139, -0.1570444, -0.084262975, -0.13291366, -0.023741532, -0.14857918, 0.09190113, -0.08041958, -0.019837314, 0.09569589, -0.053029243, -0.030722905, -0.05239678, -0.15091442, -0.05872137, -0.056045584, 0.11831845, -0.1145237, -0.08761988, -0.0035727844, -0.05570503, 0.19285129, 0.011761302, 0.087717175, 0.107712604, -0.11277228, -0.042423394, 0.13048112, -0.03356897, 0.056775343, 0.08640361, -0.11831845, -0.10868562, 0.0410855, 0.12036178, -0.09477153, -0.017611546, 0.0075043673, -0.12668636, -0.006391483, 0.0012185475, -0.05161837}));
docs.add(new Document("item8", "1", "The hero saves the day again.", new double[] {0.11208976, 0.058739875, -0.017022463, 0.15080968, -0.0031057745, -0.048069898, -0.059069872, 0.09861479, 0.11626975, 0.12935972, 0.008882481, 0.08766981, -0.00940498, -0.077164836, 0.0015795279, 0.06297486, -0.06313986, 0.0931698, -0.17247963, -0.12077974, -0.044797402, -0.1404697, -0.050077394, 0.020432455, 0.07897983, 0.022632452, -0.046914898, 0.0031212433, -0.059399873, 0.03321993, -0.07237984, 0.10119978, 0.19612958, -0.086349815, 0.038252417, 0.084094815, 0.16257966, 0.15520966, 0.063304864, -0.08761481, 0.06132487, -0.08486482, -0.09789979, 0.06935485, -0.04160741, 0.05585238, -0.13485971, 0.105544776, -0.08084983, 0.103344776, 0.053817384, -0.0923998, -0.052607387, 0.015537467, 0.025299946, -0.05961987, 0.08090483, -0.25453946, 0.01894746, -0.026344944, -0.14552969, 0.014341219, 0.14409968, 0.12264974, -0.0456224, -0.13529971, -0.0466674, 0.006166862, 0.02776119, -0.020184956, 0.093939796, 0.054779883, -0.09635979, -0.016334964, -0.029177437, -0.06863985, 0.19139959, -0.08794481, -0.08464482, -0.013413096, 0.022109952, -0.122099735, -0.0458149, -0.019676207, 0.10785477, -0.109119765, -0.09690979, -0.28423938, 0.050709892, 0.12803972, 0.10620477, 0.12110974, -0.006658423, -0.052304886, -0.07798983, -0.035667423, -0.07507484, 0.02745869, 0.07237984, -0.0230862, -0.03684992, -0.067539856, -0.052387387, -0.05202989, 0.14244969, 0.080684826, -0.038472418, 0.112639755, 0.03242243, -0.07501984, 0.10631477, -0.024076197, -0.07754983, 0.06610986, -0.12671973, -0.044082403, 0.006001862, 0.037454918, 0.054504883, -0.03679492, 0.076669835, 0.02271495, 0.14794968, 0.06440486, -0.006850923, -0.06984985, 0.035639923, -0.009143731}));
docs.add(new Document("item9", "2", "Don't worry about it.", new double[] {-0.051598575, 0.22204931, -0.017881807, 0.11678282, 0.18426134, -0.03713568, -0.016847137, 0.06549915, 0.057626653, 0.032569632, 0.00076827104, -0.04489571, -0.07530603, 0.10778569, 0.030477798, 0.050338972, -0.21053298, -0.04341118, -0.097708896, -0.13432723, 0.1438642, 0.059606023, -0.12299085, -0.036820777, -0.026699001, 0.18381149, -0.02861089, 0.08259371, -0.14962237, -0.07373153, 0.02321261, 0.085607745, -0.13810603, 0.065139264, -0.12685962, 0.14098512, 0.17112552, -0.035921063, -0.008536032, -0.09680918, 0.04435588, -0.16086878, -0.035718627, 0.09689915, -0.0007956842, 0.034256592, -0.00234769, 0.04577293, 0.06725359, 0.015958669, -0.06486935, 0.124160476, 0.09887852, -0.050518915, -0.07080746, -0.078859895, 0.17013584, -0.22078972, -0.10103783, 0.06873812, 0.0370682, 0.04563797, -0.060235824, -0.056816913, -0.064689405, 0.11273411, 0.16572724, 0.108415484, 0.07921978, 0.05569227, -0.11210431, -0.05848138, -0.008361713, 0.07458626, -0.08992637, -0.07557594, -0.0020102975, -0.07080746, -0.0092614265, 0.06626391, -0.05848138, -0.078545, 0.08362838, -0.031737395, -0.047549862, -0.15367107, 0.093930095, -0.087182246, 0.06401462, 0.09006133, 0.10886534, 0.013338254, 0.025551865, -0.027553728, 0.14206477, -0.09060115, -0.07818511, -0.001209693, 0.017893054, -0.069367915, 0.0709874, 0.050249003, -0.13396735, -0.056681953, -0.022166694, -0.02170559, 0.08277365, -0.101667635, 0.09096104, 0.049529232, -0.095819496, -0.08974643, 0.054477658, -0.037967913, -0.08682236, 0.077690266, 0.03828281, -0.04136433, -0.14431405, -0.060505737, 0.025619343, -0.019400073, 0.11075474, 0.066893704, 0.07494614, 0.03684327, 0.03929499, -0.017353225}));
return docs;
}
public String getQueryVector() {
return "0.13481648, 0.022579897, -0.038054377, 0.035029914, 0.15404047, -0.012947189, 0.013434003, 0.0328755,0.0859279, 0.090071, 0.07391291, 0.10896354, 0.04085097, 0.019876525, 0.013806882, 0.03799223,-0.097528584, -0.10324606, -0.07863604, -0.01846787, -0.0018139011, 0.09686569, 0.0065512774,0.014107257, 0.0004389097, 0.07432722, 0.012698603, 0.09404838, 0.11592395, -0.08290344, -0.023802113,0.0771031, 0.15578057, 0.17152436, 0.06732538, 0.04408259, 0.04163816, 0.03196402, 0.08451925,0.05365315, -0.10473758, 0.054730356, -0.0686926, 0.12412729, 0.1910798, -0.048971448, -0.007649199,-0.059122045, -0.005241022, 0.021440545, -0.1014231, 0.08282058, -0.057671957, -0.024755025, -0.07619162,0.0966171, 0.047272775, -0.20384054, -0.024319999, -0.014739079, 0.03681145, -0.03331053, 0.09172824,-0.09744572, 0.08584504, 0.13440217, -0.019234344, 0.19605151, 0.030389642, -0.0646738, 0.08447782,-0.107969195, -0.19058262, -0.05369458, 0.071675636, -0.095871344, 0.030141056, 0.025107188, 0.06309942,-0.09951727, 0.031446133, -0.07888463, -0.08182623, -0.063762315, -0.0955399, -0.03948375, -0.031073254,-0.29979473, -0.06711823, 0.07743455, 0.0074161496, 0.01248109, -0.0812462, -0.0676154, 3.0668652e-05,-0.17931339, 0.2008575, 0.026950868, 0.11111795, 0.07507298, -0.00898017, 0.038800135, -0.015267325,-0.13788238, -0.010740988, -0.00870569, -0.037743647, -0.12445874, 0.014676933, -0.27344462, -0.05738194,0.07101274, -0.023615673, 0.0040369336, -0.039028008, 0.06546099, -0.072421394, -0.17119291,-0.019358637, -0.05489608, 0.006199114, 0.0515816, 0.050048653, -0.034843475, 0.07847032, -0.13315925,0.025335059, -0.0432954";
}
static class Document {
@JsonProperty("id")
String id;
@JsonProperty("pk")
String pk;
@JsonProperty("text")
String text;
@JsonProperty("embedding")
double[] embedding;
@JsonProperty("score")
double score;
public Document(String id, String pk, String text, double[] embedding) {
this.id = id;
this.pk = pk;
this.text = text;
this.embedding = embedding;
}
public Document() {}
public String getId() { return id; }
public double getScore() {
return score;
}
}
} | class NonStreamingOrderByQueryVectorSearchTest {
protected static final int TIMEOUT = 30000;
protected static final int SETUP_TIMEOUT = 20000;
protected static final int SHUTDOWN_TIMEOUT = 20000;
protected static Logger logger = LoggerFactory.getLogger(NonStreamingOrderByQueryVectorSearchTest.class.getSimpleName());
private final String databaseId = CosmosDatabaseForTest.generateId();
private final String flatContainerId = "flat_" + UUID.randomUUID();
private final String quantizedContainerId = "quantized_" + UUID.randomUUID();
private final String largeDataContainerId = "large_data_" + UUID.randomUUID();
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer flatIndexContainer;
private CosmosAsyncContainer quantizedIndexContainer;
private CosmosAsyncContainer largeDataContainer;
@BeforeClass(groups = {"query", "split"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"query"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeDeleteDatabase(database);
safeClose(client);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void flatIndexVectorSearch() {
String queryVector = getQueryVector();
String vanilla_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = flatIndexContainer.queryItems(vanilla_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String dotproductSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(dotproductSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void quantizedIndexVectorSearch() {
String queryVector = getQueryVector();
String vanillaQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = quantizedIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, true);
String dotproduct_specs_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(dotproduct_specs_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT * 40)
public void largeDataVectorSearch() {
double embeddingValue = 0.0001;
for (int i = 1; i <= 2000; i++) {
Document doc = new Document(String.valueOf(i), String.valueOf(i % 2), "text" + i, new double[]{embeddingValue, embeddingValue});
largeDataContainer.createItem(doc).block();
embeddingValue = 0.0001 * (i + 1);
}
String query = "SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000";
List<Document> resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = "SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = "SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = "SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = "SELECT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = "SELECT DISTINCT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
}
@Test(groups = {"split"}, timeOut = TIMEOUT * 40)
public void splitHandlingVectorSearch() throws Exception {
AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client);
List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(flatContainerId, asyncDocumentClient);
String queryVector = getQueryVector();
String vanillaQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = flatIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
logger.info("Scaling up throughput for split");
ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000);
ThroughputResponse throughputResponse = flatIndexContainer.replaceThroughput(throughputProperties).block();
logger.info("Throughput replace request submitted for {} ",
throughputResponse.getProperties().getManualThroughput());
throughputResponse = flatIndexContainer.readThroughput().block();
while (true) {
assert throughputResponse != null;
if (!throughputResponse.isReplacePending()) {
break;
}
logger.info("Waiting for split to complete");
Thread.sleep(10 * 1000);
throughputResponse = flatIndexContainer.readThroughput().block();
}
List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(flatContainerId, asyncDocumentClient);
assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size())
.as("Partition ranges should increase after split");
logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size());
resultDocs = flatIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
private List<PartitionKeyRange> getPartitionKeyRanges(
String containerId, AsyncDocumentClient asyncDocumentClient) {
List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>();
List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient
.readPartitionKeyRanges("/dbs/" + database.getId()
+ "/colls/" + containerId,
new CosmosQueryRequestOptions())
.collectList().block();
partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults()));
return partitionKeyRanges;
}
private void validateOrdering(int top, List<Document> docs, boolean isEucledian) {
assertThat(docs.size()).isEqualTo(top);
if (isEucledian) {
for (int i = 0; i < docs.size() - 1; i++) {
assertThat(docs.get(i).getScore()).isLessThanOrEqualTo(docs.get(i + 1).getScore());
}
} else {
for (int i = 0; i < docs.size() - 1; i++) {
assertThat(docs.get(i).getScore()).isGreaterThanOrEqualTo(docs.get(i + 1).getScore());
}
}
}
private CosmosVectorEmbeddingPolicy populateVectorEmbeddingPolicy(Long dimensions) {
CosmosVectorEmbeddingPolicy policy = new CosmosVectorEmbeddingPolicy();
CosmosVectorEmbedding embedding = new CosmosVectorEmbedding();
embedding.setPath("/embedding");
embedding.setDataType(CosmosVectorDataType.FLOAT32);
embedding.setDimensions(dimensions);
embedding.setDistanceFunction(CosmosVectorDistanceFunction.COSINE);
policy.setCosmosVectorEmbeddings(Collections.singletonList(embedding));
return policy;
}
private IndexingPolicy populateIndexingPolicy(CosmosVectorIndexType vectorIndexType) {
IndexingPolicy indexingPolicy = new IndexingPolicy();
indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT);
IncludedPath includedPath1 = new IncludedPath("/*");
indexingPolicy.setIncludedPaths(Collections.singletonList(includedPath1));
CosmosVectorIndexSpec cosmosVectorIndexSpec = new CosmosVectorIndexSpec();
cosmosVectorIndexSpec.setPath("/embedding");
cosmosVectorIndexSpec.setType(vectorIndexType.toString());
indexingPolicy.setVectorIndexes(Collections.singletonList(cosmosVectorIndexSpec));
return indexingPolicy;
}
public List<Document> getVectorDocs() {
List<Document> docs = new ArrayList<>();
docs.add(new Document("item0", "1", "Good morning!", new double[]{-0.008334724, -0.05993167, -0.0903545, -0.04791922, -0.01825805, -0.053011455, 0.120733805, 0.017714009, 0.07346743, 0.11559805, 0.03262076, 0.074512, 0.015864266, 0.01981401, 0.007850527, 0.076296456, -0.08539284, 0.016593281, -0.05423011, 0.07520837, 0.074250855, 0.056754466, -0.022098986, 0.03155444, 0.04334927, 0.024655985, -0.02109795, 0.044023883, -0.027550288, -0.11350893, -0.022806242, 0.08608921, 0.009221513, 0.06659074, 0.09087678, 0.024830079, 0.0075513036, 0.036472578, 0.015418151, 0.060497474, 0.010940685, -0.059365865, 0.043566886, 0.00427073, -0.023546139, 0.030357545, -0.03403527, 0.1182965, 0.1115939, -0.018954424, 0.0032452107, 0.10297628, 0.15328929, -0.016952349, -0.04530782, 0.04674409, -8.351895e-05, -0.19376601, -0.025091218, -0.03664667, -0.011860116, -0.10454312, -0.13109237, -0.049268447, 0.17557324, 0.044872586, 0.046787616, 0.15337633, -0.019509347, 0.0077743605, 0.04556896, -0.08413066, -0.028681897, 0.1209079, 0.1357929, -0.09314, 0.12534729, -0.065546185, 0.12212656, 0.04892026, 0.07394619, -0.08134516, -0.004493787, 0.08138869, 0.028573086, 0.12290998, -0.16477945, -0.29839617, -0.08090993, 0.12256179, 0.16591106, -0.08173688, -0.034383457, -0.1076768, -0.043022845, -0.07655759, 0.2021225, 0.03923631, 0.07703635, -0.08587159, 0.06498038, -0.08330371, 0.16486649, -0.14040637, 0.02070624, -0.069855, 0.052880887, 0.016136287, 0.00024294876, -0.19968519, 0.06933272, 0.013241983, 0.0004002109, 0.14998151, 0.07516485, 0.18610589, -0.07895138, -0.108982496, -0.03494926, -0.027637335, -0.032925423, -0.009509855, 0.1182965, -0.075513035, -0.08665501, 0.019629037, 0.2583547, 0.00983084}));
docs.add(new Document("item1", "2", "Good afternoon!", new double[]{-0.04136167, -0.047741424, -0.08737179, -3.0502639e-05, 0.054595508, -0.11298566, 0.0906921, 0.108052626, 0.04729081, 0.21287979, -0.06588459, -0.052793052, -0.05568647, 0.017384235, -0.02518698, 0.021665072, -0.19238868, 0.03379609, 0.0075359354, -0.04989963, 0.055449303, 0.03282371, 0.026064493, 0.042096883, -0.007192045, 0.0786441, -0.09012291, 0.094012424, -0.0031483725, -0.0318039, -0.034721036, 0.10255038, 0.08851018, 0.11327027, 0.031614166, 0.006035863, 0.021321183, 0.0703433, 0.14201473, 0.058674756, -0.007986549, 0.03329804, 0.08884221, -0.09505595, -0.015522485, -0.068161376, 0.072572656, 0.049235567, 0.2263508, 0.029574543, -0.050563693, 0.050516263, 0.04660303, 0.08376687, 0.017514676, 0.0706279, 0.007921329, -0.1353741, -0.054358345, -0.1628853, -0.097617336, -0.123326086, -0.0489984, -0.0693472, 0.15396787, 0.027368903, 0.06042978, 0.22862759, -0.016293272, 0.033582643, 0.06697555, -0.10302471, -0.12104929, 0.18034068, 0.068303674, 0.003468546, 0.07480201, -0.1204801, 0.06787678, -0.042926963, 0.15785739, -0.034531303, -0.028934196, 0.022933908, -0.012012435, 0.004248228, -0.19172463, -0.31837103, -0.099230066, 0.02071641, 0.18546346, 0.06299117, -0.047053643, -0.09206767, 0.049472734, -0.046365865, 0.18214314, 0.0051079527, 0.105680965, -0.026443958, 0.072714955, -0.08073115, 0.09429702, -0.113744594, 0.02092986, -0.013625161, -0.07390078, 0.020550394, 0.011787128, -0.13499463, 0.015107445, -0.0015223064, 0.040104695, 0.094961084, 0.13404597, 0.082486175, -0.07428025, -0.026847139, -0.025163265, -0.09818654, 0.010043961, 0.013601444, 0.077932596, 0.016056107, -0.026965722, 0.045606934, 0.085047565, -0.005878741}));
docs.add(new Document("item2", "1", "Great weather today!", new double[]{0.07557184, -0.047335204, -0.054325826, 0.011354052, 0.07698824, -0.025358144, -0.07927276, 0.1510523, 0.022251202, 0.026249107, -0.021725763, 0.062047496, 0.14556946, -0.0006071819, -0.107098185, 0.021394506, -0.13771072, -0.10527057, 0.115048304, -0.048842985, -0.059443146, 0.060585406, -0.014940745, 0.18075103, 0.053777542, -0.022057017, 0.08493836, 0.1198001, 0.041555375, -0.066525154, 0.054280136, 0.107646465, 0.023359193, -0.0046632714, -0.1017981, 0.04105278, 0.11376897, 0.016437104, 0.067393266, 0.0961325, -0.017727856, -0.03333111, -0.020492123, -0.041943744, 0.0031040881, -0.028053876, 0.053640474, -0.02215982, 0.08205987, 0.015100661, -0.17133881, -0.05843796, 0.0895074, -0.04064157, -0.10225501, 0.11367759, -0.03593546, -0.010999952, -0.14273666, -0.010383132, 0.026112035, -0.14200561, -0.0634639, -0.04029889, 0.07383561, 0.116784535, 0.0805064, 0.13241065, -0.100884296, -0.10216363, -0.030384084, -0.07657703, -0.06323545, -0.013101708, 0.15233164, -0.009880538, 0.16448526, -0.11148446, -0.05606206, -0.11587073, 0.084892675, -0.1397211, -0.04948265, 0.006470896, 0.015637523, 0.09051259, -0.12665366, -0.28583884, -0.16046451, 0.055833608, 0.100244634, 0.013535767, -0.07155109, -0.10051877, 0.08662891, -0.0729218, 0.15818, 0.025449526, 0.05053353, -0.015740326, -0.0767141, -0.056381892, 0.091517776, -0.075114936, -0.04621579, -0.040458806, 0.03947646, 0.10901718, 0.0076417113, -0.34487078, 0.024375802, -0.072419204, 0.053000808, 0.04057303, -0.08434439, 0.027185759, 0.03403931, -0.06332683, 0.051858548, 0.011993717, 0.10728095, -0.09887392, 0.07593736, -0.0025172527, 0.1089258, -0.048934367, 0.051264573, 0.017008234}));
docs.add(new Document("item3", "2", "Hope you're doing well.", new double[]{-0.016385807, 0.16914073, -0.062722616, 0.009645036, 0.06367876, -0.10871283, -0.034588274, 0.0038275379, 0.107661076, 0.069654614, 0.036404934, -0.037910853, 0.08872956, 0.14571536, -0.082227826, 0.034731694, -0.07405285, 0.050340638, -0.16569862, -0.20671692, -0.0834708, 0.0043175584, -0.017234378, 0.052683175, 0.013863994, 0.12142946, -0.002630872, 0.069702424, 0.095804974, -0.10020321, 0.0793116, 0.028970966, 0.13940485, 0.15814514, 0.11597948, -0.03795866, -0.018178564, 0.14753202, -0.10670494, -0.055455975, 0.058037546, -0.04457991, -0.0046014115, -0.018704439, 0.07902476, -0.07902476, 0.031026661, -0.017855868, 0.098769, 0.118847884, -0.051488005, 0.009017572, 0.15068726, 0.044962365, 0.18233542, -0.0006853563, 0.11205931, -0.2602606, 0.09848216, 0.08681728, -0.077638365, -0.008664995, 0.010314333, -0.0361659, 0.047185384, 0.09202823, 0.004359389, -0.008545479, -0.08815587, 0.0765388, 0.012262463, 0.0542608, -0.10813915, 0.11722245, 0.013744476, -0.08265808, 0.008055458, 0.12085578, 0.056364305, -0.12907855, -0.05311344, -0.060666922, 0.117031224, 0.029233903, -0.148775, 0.017879771, -0.081558526, -0.26332027, -0.22029407, -0.07410065, 0.0059340284, -0.11234615, -0.06898532, 0.046516087, 0.06798138, -0.049193274, -0.04687464, -0.049002044, -0.03145692, 0.0065614935, -0.021274058, -0.060188852, -0.04584679, -0.015979448, 0.08949447, -0.050197218, -0.051440194, -0.1341461, 0.08557431, -0.08261028, -0.104314595, -0.016134819, 0.057320442, -0.022421423, 0.012501498, 0.055503782, 0.020568907, -0.095183484, 0.0049450235, -0.03525757, 0.17688543, -0.06888971, -0.005694994, 0.05622088, -0.04250031, 0.050053798, 0.16063109, 0.06353533}));
docs.add(new Document("item4", "1", "Excuse me please.", new double[]{-0.19188246, 0.018719073, -0.032395326, 0.09734556, 0.021671357, -0.11751684, -0.078514785, 0.16507255, -0.0012956136, 0.117006175, -0.065492816, 0.106282204, -0.009750514, -0.006008296, 0.021799022, 0.04643862, -0.046023704, -0.023442727, 0.12868765, -0.1466886, -0.085089594, -0.046885453, -0.0067742937, 0.048162118, -0.04739612, 0.021687314, -0.025581138, 0.04841745, -0.10519704, -0.039129723, -0.09747323, 0.10532471, 0.04375763, 0.09536674, 0.0145938555, -0.0060681393, 0.26171595, 0.1815415, -0.03833181, 0.012487361, -0.027192924, -0.12281499, 0.017937116, -0.02173519, 0.07308897, -0.06913131, 0.07417413, -0.01884674, 0.049023863, -0.049949445, 0.081068106, 0.22060739, -0.031645287, -0.024735348, -0.041108552, 0.1823075, -0.06230116, -0.119048834, -0.07813178, -0.0841321, -0.007711843, 0.039576557, -0.07589762, 0.028198296, 0.003087929, 0.047970615, 0.0845151, 0.08208944, 0.07423796, 0.01259907, 0.00046179298, 0.024671515, 0.10302671, 0.12160216, 0.1353263, -0.16251922, 0.069195144, -0.09160058, 0.033320908, -0.06341824, -0.06402466, -0.048864283, -0.10053722, -0.019341446, 0.027033342, -0.19354212, -0.011146865, -0.31329313, 0.054513514, -0.0098861605, 0.10277138, 0.059237167, 0.021495815, -0.0704718, 0.14285861, 0.042672466, 0.057769008, 0.054353934, -0.041363884, 0.07819562, 0.1085802, -0.0047874865, 0.0035626881, 0.025405597, 0.0032953867, 0.13430496, -0.084451266, -0.10883553, 0.115601845, -0.072259136, -0.06976964, -0.1081972, 0.08515343, 0.044715125, 0.05725834, -0.06759931, -0.0421618, -0.06185433, -0.068939805, -0.13673063, -0.032874074, -0.121538326, -0.010157451, -0.048608948, 0.049949445, 0.031310163, 0.13238996, 0.06855681}));
docs.add(new Document("item5", "2", "Is this the right place?", new double[]{-0.05695127, 0.07729321, -0.07253956, 0.054049686, -0.084886715, -0.1677979, -0.020681491, -0.010765179, -0.05312365, 0.10964277, -0.1724898, -0.0139754405, -0.019446775, -0.009877727, 0.10902541, 0.06599557, -0.20224646, -0.008658445, -0.11698933, -0.00034678154, 0.059760246, 0.023660243, 0.014523345, 0.058340326, -0.116927594, -0.0011546522, 0.035991967, 0.017857078, -0.21261807, -0.07568809, -0.007250097, 0.09525833, 0.073033445, -0.078157514, -0.14816591, -0.089578636, -0.006030815, 0.08519539, 0.059852853, 0.12328638, 0.08544234, -0.017656436, -0.03901702, 0.036238912, -0.09482618, 0.007215371, 0.15742627, 0.014183799, 0.107914165, 0.014245534, -0.011907292, 0.025188204, 0.057630364, -0.057321683, 0.0024366346, 0.034695517, 0.11766842, -0.16520499, 0.065193, 0.10822285, -0.06834152, -0.048925616, -0.078836605, 0.05161112, 0.07235435, 0.07636718, -0.075996764, 0.13902901, 0.023860885, 0.07846619, 0.02665443, -0.026870504, -0.0084115015, 0.07550287, 0.07500899, -0.07395948, 0.05062335, 0.05621044, -0.031531557, -0.001280053, 0.06908235, 0.078280985, -0.060068928, 0.14236274, 0.14236274, 0.06358787, -0.042474225, -0.38053942, 0.062785305, 0.050870296, 0.038677476, -0.078157514, 0.029309068, -0.07809578, 0.07439163, -0.06772417, 0.11896487, 0.073589064, 0.05238282, 0.018196626, 0.116371974, 0.0033067234, -0.020264775, -0.006304768, -0.10686466, 0.08408415, -0.04386328, -0.0068681072, 0.08994905, -0.059513304, 0.021823604, 0.049419504, -0.012817894, -0.093221046, 0.02802805, 0.089578636, 0.07124311, -0.26694557, 0.024833223, -0.03802925, -0.18483697, -0.1103836, 0.11877967, 0.07000839, -0.018860284, -0.044974525, -0.034016423, 0.022780508}));
docs.add(new Document("item6", "1", "Awful weather today.", new double[]{0.002305239, -0.02629875, 0.009952777, 0.026884208, -0.0067561795, -0.1118458, 0.07432968, 0.0999493, 0.02177902, -0.047726493, 0.042059265, 0.1583077, 0.11175212, 0.07685886, -0.14060347, 0.005084698, -0.19277944, -0.05606341, 0.082619764, -0.04241054, 0.09620237, 0.011832096, -0.023301208, 0.28214368, 0.02451896, 0.02793803, 0.03358184, 0.044424515, 0.11006601, 0.038101573, 0.0077982936, 0.045572013, 0.075922124, 0.016369391, -0.0039986745, 0.22781321, 0.062854715, 0.048054352, 0.010251361, 0.07170683, -0.018816603, 0.027329156, -0.08482108, -0.079481706, 0.042785235, 0.024120849, 0.17413847, 0.035431888, 0.12439801, 0.10641275, -0.14828467, -0.024425287, 0.083415985, 0.1184966, 0.0026799317, 0.15399873, -0.010304051, 0.009742012, -0.10781785, -0.019472316, 0.061777476, -0.09798217, 0.028804509, -0.023371464, 0.015491205, 0.07521958, 0.024003757, 0.013465522, -0.089692086, -0.097794816, 0.021193562, -0.0592483, -0.056438103, -0.10987866, 0.11802823, -0.06440032, 0.07704621, -0.040138967, -0.13891736, -0.16027485, 0.08631986, -0.16786237, -0.085757814, 0.015491205, -0.013243048, 0.09133137, -0.16196096, -0.2313728, -0.083134964, 0.12308659, 0.07559427, 0.09723278, 0.048663225, -0.13339064, 0.016345974, -0.13189186, 0.11025336, 0.03346475, -0.00688498, -0.059435643, 0.0056877197, 0.014999421, 0.063650936, -0.15053283, -0.017481761, 0.05194179, 0.061402783, 0.0077924393, 0.19971126, -0.07713988, 0.06425981, 0.0021369199, 0.12158781, -0.024331613, -0.008571098, 0.03264511, -0.0020242194, -0.05507984, -0.00805004, -0.03304322, 0.050630365, -0.1475353, -0.003735219, -0.0202217, 0.16271034, -0.059435643, 0.06023187, 0.06660164}));
docs.add(new Document("item7", "2", "Dinosaurs were huge.", new double[]{0.09842033, -0.025395654, -0.03831241, 0.037825905, 0.17008278, -0.022269849, -0.05935383, 0.032668933, -0.022148222, 0.08178179, -0.062710725, 0.164342, -0.024021273, -0.052640036, 0.027366007, 0.18098053, -0.18487258, -0.008903074, 0.16346629, 0.009407825, 0.110339746, -0.046315446, 0.08046822, 0.12512955, -0.06635953, 0.0070239417, 0.055753678, -0.0005336371, -0.012326866, 0.1578228, -0.023668556, -0.035904203, 0.05988899, -0.08032227, -0.11520481, 0.08499274, 0.05093726, -0.09248494, -0.011128843, -0.07064079, 0.01466818, 0.082073696, -0.040574666, -0.07292737, 0.03539337, 0.05025615, -0.1145237, 0.09584184, 0.04186391, -0.034833886, -0.18234275, -0.090441614, -0.09194978, -0.031331036, -0.13106494, 0.068208255, 0.03220675, -0.031233737, -0.04337208, 0.1289243, 0.1363192, 0.052494083, 0.03074723, -0.000113359885, 0.07681943, 0.03962598, 0.016529068, -0.04191256, -0.03612313, 0.023084749, 0.10917213, 0.09477153, -0.09652295, -0.0999285, 0.11685894, -0.012649177, 0.043688312, -0.10333405, -0.060521446, -0.042034186, 0.0483831, 0.0028141378, -0.17270991, 0.05424551, 0.25570798, 0.09511208, -0.08504139, -0.1570444, -0.084262975, -0.13291366, -0.023741532, -0.14857918, 0.09190113, -0.08041958, -0.019837314, 0.09569589, -0.053029243, -0.030722905, -0.05239678, -0.15091442, -0.05872137, -0.056045584, 0.11831845, -0.1145237, -0.08761988, -0.0035727844, -0.05570503, 0.19285129, 0.011761302, 0.087717175, 0.107712604, -0.11277228, -0.042423394, 0.13048112, -0.03356897, 0.056775343, 0.08640361, -0.11831845, -0.10868562, 0.0410855, 0.12036178, -0.09477153, -0.017611546, 0.0075043673, -0.12668636, -0.006391483, 0.0012185475, -0.05161837}));
docs.add(new Document("item8", "1", "The hero saves the day again.", new double[]{0.11208976, 0.058739875, -0.017022463, 0.15080968, -0.0031057745, -0.048069898, -0.059069872, 0.09861479, 0.11626975, 0.12935972, 0.008882481, 0.08766981, -0.00940498, -0.077164836, 0.0015795279, 0.06297486, -0.06313986, 0.0931698, -0.17247963, -0.12077974, -0.044797402, -0.1404697, -0.050077394, 0.020432455, 0.07897983, 0.022632452, -0.046914898, 0.0031212433, -0.059399873, 0.03321993, -0.07237984, 0.10119978, 0.19612958, -0.086349815, 0.038252417, 0.084094815, 0.16257966, 0.15520966, 0.063304864, -0.08761481, 0.06132487, -0.08486482, -0.09789979, 0.06935485, -0.04160741, 0.05585238, -0.13485971, 0.105544776, -0.08084983, 0.103344776, 0.053817384, -0.0923998, -0.052607387, 0.015537467, 0.025299946, -0.05961987, 0.08090483, -0.25453946, 0.01894746, -0.026344944, -0.14552969, 0.014341219, 0.14409968, 0.12264974, -0.0456224, -0.13529971, -0.0466674, 0.006166862, 0.02776119, -0.020184956, 0.093939796, 0.054779883, -0.09635979, -0.016334964, -0.029177437, -0.06863985, 0.19139959, -0.08794481, -0.08464482, -0.013413096, 0.022109952, -0.122099735, -0.0458149, -0.019676207, 0.10785477, -0.109119765, -0.09690979, -0.28423938, 0.050709892, 0.12803972, 0.10620477, 0.12110974, -0.006658423, -0.052304886, -0.07798983, -0.035667423, -0.07507484, 0.02745869, 0.07237984, -0.0230862, -0.03684992, -0.067539856, -0.052387387, -0.05202989, 0.14244969, 0.080684826, -0.038472418, 0.112639755, 0.03242243, -0.07501984, 0.10631477, -0.024076197, -0.07754983, 0.06610986, -0.12671973, -0.044082403, 0.006001862, 0.037454918, 0.054504883, -0.03679492, 0.076669835, 0.02271495, 0.14794968, 0.06440486, -0.006850923, -0.06984985, 0.035639923, -0.009143731}));
docs.add(new Document("item9", "2", "Don't worry about it.", new double[]{-0.051598575, 0.22204931, -0.017881807, 0.11678282, 0.18426134, -0.03713568, -0.016847137, 0.06549915, 0.057626653, 0.032569632, 0.00076827104, -0.04489571, -0.07530603, 0.10778569, 0.030477798, 0.050338972, -0.21053298, -0.04341118, -0.097708896, -0.13432723, 0.1438642, 0.059606023, -0.12299085, -0.036820777, -0.026699001, 0.18381149, -0.02861089, 0.08259371, -0.14962237, -0.07373153, 0.02321261, 0.085607745, -0.13810603, 0.065139264, -0.12685962, 0.14098512, 0.17112552, -0.035921063, -0.008536032, -0.09680918, 0.04435588, -0.16086878, -0.035718627, 0.09689915, -0.0007956842, 0.034256592, -0.00234769, 0.04577293, 0.06725359, 0.015958669, -0.06486935, 0.124160476, 0.09887852, -0.050518915, -0.07080746, -0.078859895, 0.17013584, -0.22078972, -0.10103783, 0.06873812, 0.0370682, 0.04563797, -0.060235824, -0.056816913, -0.064689405, 0.11273411, 0.16572724, 0.108415484, 0.07921978, 0.05569227, -0.11210431, -0.05848138, -0.008361713, 0.07458626, -0.08992637, -0.07557594, -0.0020102975, -0.07080746, -0.0092614265, 0.06626391, -0.05848138, -0.078545, 0.08362838, -0.031737395, -0.047549862, -0.15367107, 0.093930095, -0.087182246, 0.06401462, 0.09006133, 0.10886534, 0.013338254, 0.025551865, -0.027553728, 0.14206477, -0.09060115, -0.07818511, -0.001209693, 0.017893054, -0.069367915, 0.0709874, 0.050249003, -0.13396735, -0.056681953, -0.022166694, -0.02170559, 0.08277365, -0.101667635, 0.09096104, 0.049529232, -0.095819496, -0.08974643, 0.054477658, -0.037967913, -0.08682236, 0.077690266, 0.03828281, -0.04136433, -0.14431405, -0.060505737, 0.025619343, -0.019400073, 0.11075474, 0.066893704, 0.07494614, 0.03684327, 0.03929499, -0.017353225}));
return docs;
}
public String getQueryVector() {
return "0.13481648, 0.022579897, -0.038054377, 0.035029914, 0.15404047, -0.012947189, 0.013434003, 0.0328755,0.0859279, 0.090071, 0.07391291, 0.10896354, 0.04085097, 0.019876525, 0.013806882, 0.03799223,-0.097528584, -0.10324606, -0.07863604, -0.01846787, -0.0018139011, 0.09686569, 0.0065512774,0.014107257, 0.0004389097, 0.07432722, 0.012698603, 0.09404838, 0.11592395, -0.08290344, -0.023802113,0.0771031, 0.15578057, 0.17152436, 0.06732538, 0.04408259, 0.04163816, 0.03196402, 0.08451925,0.05365315, -0.10473758, 0.054730356, -0.0686926, 0.12412729, 0.1910798, -0.048971448, -0.007649199,-0.059122045, -0.005241022, 0.021440545, -0.1014231, 0.08282058, -0.057671957, -0.024755025, -0.07619162,0.0966171, 0.047272775, -0.20384054, -0.024319999, -0.014739079, 0.03681145, -0.03331053, 0.09172824,-0.09744572, 0.08584504, 0.13440217, -0.019234344, 0.19605151, 0.030389642, -0.0646738, 0.08447782,-0.107969195, -0.19058262, -0.05369458, 0.071675636, -0.095871344, 0.030141056, 0.025107188, 0.06309942,-0.09951727, 0.031446133, -0.07888463, -0.08182623, -0.063762315, -0.0955399, -0.03948375, -0.031073254,-0.29979473, -0.06711823, 0.07743455, 0.0074161496, 0.01248109, -0.0812462, -0.0676154, 3.0668652e-05,-0.17931339, 0.2008575, 0.026950868, 0.11111795, 0.07507298, -0.00898017, 0.038800135, -0.015267325,-0.13788238, -0.010740988, -0.00870569, -0.037743647, -0.12445874, 0.014676933, -0.27344462, -0.05738194,0.07101274, -0.023615673, 0.0040369336, -0.039028008, 0.06546099, -0.072421394, -0.17119291,-0.019358637, -0.05489608, 0.006199114, 0.0515816, 0.050048653, -0.034843475, 0.07847032, -0.13315925,0.025335059, -0.0432954";
}
static class Document {
@JsonProperty("id")
String id;
@JsonProperty("pk")
String pk;
@JsonProperty("text")
String text;
@JsonProperty("embedding")
double[] embedding;
@JsonProperty("score")
double score;
public Document(String id, String pk, String text, double[] embedding) {
this.id = id;
this.pk = pk;
this.text = text;
this.embedding = embedding;
}
public Document() {
}
public String getId() {
return id;
}
public double getScore() {
return score;
}
}
} |
Added tests with multi-partitions as well | public void before_NonStreamingOrderByQueryVectorSearchTest() {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode(DirectConnectionConfig.getDefaultConfig())
.consistencyLevel(ConsistencyLevel.SESSION)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
database = createDatabase(client, databaseId);
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/pk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties containerProperties = new CosmosContainerProperties(flatContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties).block();
flatIndexContainer = database.getContainer(flatContainerId);
containerProperties = new CosmosContainerProperties(quantizedContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties).block();
quantizedIndexContainer = database.getContainer(quantizedContainerId);
containerProperties = new CosmosContainerProperties(largeDataContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(2L));
database.createContainer(containerProperties).block();
largeDataContainer = database.getContainer(largeDataContainerId);
for (Document doc: getVectorDocs()) {
flatIndexContainer.createItem(doc).block();
quantizedIndexContainer.createItem(doc).block();
}
} | database.createContainer(containerProperties).block(); | public void before_NonStreamingOrderByQueryVectorSearchTest() {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.directMode(DirectConnectionConfig.getDefaultConfig())
.consistencyLevel(ConsistencyLevel.SESSION)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
database = createDatabase(client, databaseId);
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<String>();
paths.add("/pk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties containerProperties = new CosmosContainerProperties(flatContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties).block();
flatIndexContainer = database.getContainer(flatContainerId);
containerProperties = new CosmosContainerProperties(quantizedContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(128L));
database.createContainer(containerProperties, ThroughputProperties.createManualThroughput(20000)).block();
quantizedIndexContainer = database.getContainer(quantizedContainerId);
containerProperties = new CosmosContainerProperties(largeDataContainerId, partitionKeyDef);
containerProperties.setIndexingPolicy(populateIndexingPolicy(CosmosVectorIndexType.QUANTIZED_FLAT));
containerProperties.setVectorEmbeddingPolicy(populateVectorEmbeddingPolicy(2L));
database.createContainer(containerProperties).block();
largeDataContainer = database.getContainer(largeDataContainerId);
for (Document doc : getVectorDocs()) {
flatIndexContainer.createItem(doc).block();
quantizedIndexContainer.createItem(doc).block();
}
} | class NonStreamingOrderByQueryVectorSearchTest {
protected static final int TIMEOUT = 30000;
protected static final int SETUP_TIMEOUT = 20000;
protected static final int SHUTDOWN_TIMEOUT = 20000;
protected static Logger logger = LoggerFactory.getLogger(NonStreamingOrderByQueryVectorSearchTest.class.getSimpleName());
private final String databaseId = CosmosDatabaseForTest.generateId();
private final String flatContainerId = "flat_" + UUID.randomUUID();
private final String quantizedContainerId = "quantized_" + UUID.randomUUID();
private final String largeDataContainerId = "large_data_" + UUID.randomUUID();
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer flatIndexContainer;
private CosmosAsyncContainer quantizedIndexContainer;
private CosmosAsyncContainer largeDataContainer;
@BeforeClass(groups = {"query"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"query"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeDeleteDatabase(database);
safeClose(client);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void flatIndexVectorSearch() {
String queryVector = getQueryVector();
String vanilla_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = flatIndexContainer.queryItems(vanilla_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String dotproductSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(dotproductSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void quantizedIndexVectorSearch() {
String queryVector = getQueryVector();
String vanillaQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = quantizedIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, true);
String dotproduct_specs_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(dotproduct_specs_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT*1000)
public void largeDataVectorSearch() {
double embeddingValue = 0.0001;
for (int i=1;i<=2000;i++) {
Document doc = new Document(String.valueOf(i), String.valueOf(i%2), "text" + i, new double[] {embeddingValue, embeddingValue});
largeDataContainer.createItem(doc).block();
embeddingValue = 0.0001 * (i+1);
}
String query = String.format("SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000");
List<Document> resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = String.format("SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = String.format("SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = String.format("SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = String.format("SELECT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = String.format("SELECT DISTINCT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])");
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
}
private void validateOrdering(int top, List<Document> docs, boolean isEucledian) {
assertThat(docs.size()).isEqualTo(top);
if (isEucledian) {
for (int i=0;i<docs.size()-1;i++) {
assertThat(docs.get(i).getScore()).isLessThanOrEqualTo(docs.get(i+1).getScore());
}
} else {
for (int i=0;i<docs.size()-1;i++) {
assertThat(docs.get(i).getScore()).isGreaterThanOrEqualTo(docs.get(i+1).getScore());
}
}
}
private CosmosVectorEmbeddingPolicy populateVectorEmbeddingPolicy(Long dimensions) {
CosmosVectorEmbeddingPolicy policy = new CosmosVectorEmbeddingPolicy();
CosmosVectorEmbedding embedding = new CosmosVectorEmbedding();
embedding.setPath("/embedding");
embedding.setDataType(CosmosVectorDataType.FLOAT32);
embedding.setDimensions(dimensions);
embedding.setDistanceFunction(CosmosVectorDistanceFunction.COSINE);
policy.setCosmosVectorEmbeddings(Collections.singletonList(embedding));
return policy;
}
private IndexingPolicy populateIndexingPolicy(CosmosVectorIndexType vectorIndexType) {
IndexingPolicy indexingPolicy = new IndexingPolicy();
indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT);
IncludedPath includedPath1 = new IncludedPath("/*");
indexingPolicy.setIncludedPaths(Collections.singletonList(includedPath1));
CosmosVectorIndexSpec cosmosVectorIndexSpec = new CosmosVectorIndexSpec();
cosmosVectorIndexSpec.setPath("/embedding");
cosmosVectorIndexSpec.setType(vectorIndexType.toString());
indexingPolicy.setVectorIndexes(Collections.singletonList(cosmosVectorIndexSpec));
return indexingPolicy;
}
public List<Document> getVectorDocs() {
List<Document> docs = new ArrayList<>();
docs.add(new Document("item0", "1", "Good morning!", new double[] {-0.008334724, -0.05993167, -0.0903545, -0.04791922, -0.01825805, -0.053011455, 0.120733805, 0.017714009, 0.07346743, 0.11559805, 0.03262076, 0.074512, 0.015864266, 0.01981401, 0.007850527, 0.076296456, -0.08539284, 0.016593281, -0.05423011, 0.07520837, 0.074250855, 0.056754466, -0.022098986, 0.03155444, 0.04334927, 0.024655985, -0.02109795, 0.044023883, -0.027550288, -0.11350893, -0.022806242, 0.08608921, 0.009221513, 0.06659074, 0.09087678, 0.024830079, 0.0075513036, 0.036472578, 0.015418151, 0.060497474, 0.010940685, -0.059365865, 0.043566886, 0.00427073, -0.023546139, 0.030357545, -0.03403527, 0.1182965, 0.1115939, -0.018954424, 0.0032452107, 0.10297628, 0.15328929, -0.016952349, -0.04530782, 0.04674409, -8.351895e-05, -0.19376601, -0.025091218, -0.03664667, -0.011860116, -0.10454312, -0.13109237, -0.049268447, 0.17557324, 0.044872586, 0.046787616, 0.15337633, -0.019509347, 0.0077743605, 0.04556896, -0.08413066, -0.028681897, 0.1209079, 0.1357929, -0.09314, 0.12534729, -0.065546185, 0.12212656, 0.04892026, 0.07394619, -0.08134516, -0.004493787, 0.08138869, 0.028573086, 0.12290998, -0.16477945, -0.29839617, -0.08090993, 0.12256179, 0.16591106, -0.08173688, -0.034383457, -0.1076768, -0.043022845, -0.07655759, 0.2021225, 0.03923631, 0.07703635, -0.08587159, 0.06498038, -0.08330371, 0.16486649, -0.14040637, 0.02070624, -0.069855, 0.052880887, 0.016136287, 0.00024294876, -0.19968519, 0.06933272, 0.013241983, 0.0004002109, 0.14998151, 0.07516485, 0.18610589, -0.07895138, -0.108982496, -0.03494926, -0.027637335, -0.032925423, -0.009509855, 0.1182965, -0.075513035, -0.08665501, 0.019629037, 0.2583547, 0.00983084}));
docs.add(new Document("item1", "2", "Good afternoon!", new double[] {-0.04136167, -0.047741424, -0.08737179, -3.0502639e-05, 0.054595508, -0.11298566, 0.0906921, 0.108052626, 0.04729081, 0.21287979, -0.06588459, -0.052793052, -0.05568647, 0.017384235, -0.02518698, 0.021665072, -0.19238868, 0.03379609, 0.0075359354, -0.04989963, 0.055449303, 0.03282371, 0.026064493, 0.042096883, -0.007192045, 0.0786441, -0.09012291, 0.094012424, -0.0031483725, -0.0318039, -0.034721036, 0.10255038, 0.08851018, 0.11327027, 0.031614166, 0.006035863, 0.021321183, 0.0703433, 0.14201473, 0.058674756, -0.007986549, 0.03329804, 0.08884221, -0.09505595, -0.015522485, -0.068161376, 0.072572656, 0.049235567, 0.2263508, 0.029574543, -0.050563693, 0.050516263, 0.04660303, 0.08376687, 0.017514676, 0.0706279, 0.007921329, -0.1353741, -0.054358345, -0.1628853, -0.097617336, -0.123326086, -0.0489984, -0.0693472, 0.15396787, 0.027368903, 0.06042978, 0.22862759, -0.016293272, 0.033582643, 0.06697555, -0.10302471, -0.12104929, 0.18034068, 0.068303674, 0.003468546, 0.07480201, -0.1204801, 0.06787678, -0.042926963, 0.15785739, -0.034531303, -0.028934196, 0.022933908, -0.012012435, 0.004248228, -0.19172463, -0.31837103, -0.099230066, 0.02071641, 0.18546346, 0.06299117, -0.047053643, -0.09206767, 0.049472734, -0.046365865, 0.18214314, 0.0051079527, 0.105680965, -0.026443958, 0.072714955, -0.08073115, 0.09429702, -0.113744594, 0.02092986, -0.013625161, -0.07390078, 0.020550394, 0.011787128, -0.13499463, 0.015107445, -0.0015223064, 0.040104695, 0.094961084, 0.13404597, 0.082486175, -0.07428025, -0.026847139, -0.025163265, -0.09818654, 0.010043961, 0.013601444, 0.077932596, 0.016056107, -0.026965722, 0.045606934, 0.085047565, -0.005878741}));
docs.add(new Document("item2", "1", "Great weather today!", new double[] {0.07557184, -0.047335204, -0.054325826, 0.011354052, 0.07698824, -0.025358144, -0.07927276, 0.1510523, 0.022251202, 0.026249107, -0.021725763, 0.062047496, 0.14556946, -0.0006071819, -0.107098185, 0.021394506, -0.13771072, -0.10527057, 0.115048304, -0.048842985, -0.059443146, 0.060585406, -0.014940745, 0.18075103, 0.053777542, -0.022057017, 0.08493836, 0.1198001, 0.041555375, -0.066525154, 0.054280136, 0.107646465, 0.023359193, -0.0046632714, -0.1017981, 0.04105278, 0.11376897, 0.016437104, 0.067393266, 0.0961325, -0.017727856, -0.03333111, -0.020492123, -0.041943744, 0.0031040881, -0.028053876, 0.053640474, -0.02215982, 0.08205987, 0.015100661, -0.17133881, -0.05843796, 0.0895074, -0.04064157, -0.10225501, 0.11367759, -0.03593546, -0.010999952, -0.14273666, -0.010383132, 0.026112035, -0.14200561, -0.0634639, -0.04029889, 0.07383561, 0.116784535, 0.0805064, 0.13241065, -0.100884296, -0.10216363, -0.030384084, -0.07657703, -0.06323545, -0.013101708, 0.15233164, -0.009880538, 0.16448526, -0.11148446, -0.05606206, -0.11587073, 0.084892675, -0.1397211, -0.04948265, 0.006470896, 0.015637523, 0.09051259, -0.12665366, -0.28583884, -0.16046451, 0.055833608, 0.100244634, 0.013535767, -0.07155109, -0.10051877, 0.08662891, -0.0729218, 0.15818, 0.025449526, 0.05053353, -0.015740326, -0.0767141, -0.056381892, 0.091517776, -0.075114936, -0.04621579, -0.040458806, 0.03947646, 0.10901718, 0.0076417113, -0.34487078, 0.024375802, -0.072419204, 0.053000808, 0.04057303, -0.08434439, 0.027185759, 0.03403931, -0.06332683, 0.051858548, 0.011993717, 0.10728095, -0.09887392, 0.07593736, -0.0025172527, 0.1089258, -0.048934367, 0.051264573, 0.017008234}));
docs.add(new Document("item3", "2", "Hope you're doing well.", new double[] {-0.016385807, 0.16914073, -0.062722616, 0.009645036, 0.06367876, -0.10871283, -0.034588274, 0.0038275379, 0.107661076, 0.069654614, 0.036404934, -0.037910853, 0.08872956, 0.14571536, -0.082227826, 0.034731694, -0.07405285, 0.050340638, -0.16569862, -0.20671692, -0.0834708, 0.0043175584, -0.017234378, 0.052683175, 0.013863994, 0.12142946, -0.002630872, 0.069702424, 0.095804974, -0.10020321, 0.0793116, 0.028970966, 0.13940485, 0.15814514, 0.11597948, -0.03795866, -0.018178564, 0.14753202, -0.10670494, -0.055455975, 0.058037546, -0.04457991, -0.0046014115, -0.018704439, 0.07902476, -0.07902476, 0.031026661, -0.017855868, 0.098769, 0.118847884, -0.051488005, 0.009017572, 0.15068726, 0.044962365, 0.18233542, -0.0006853563, 0.11205931, -0.2602606, 0.09848216, 0.08681728, -0.077638365, -0.008664995, 0.010314333, -0.0361659, 0.047185384, 0.09202823, 0.004359389, -0.008545479, -0.08815587, 0.0765388, 0.012262463, 0.0542608, -0.10813915, 0.11722245, 0.013744476, -0.08265808, 0.008055458, 0.12085578, 0.056364305, -0.12907855, -0.05311344, -0.060666922, 0.117031224, 0.029233903, -0.148775, 0.017879771, -0.081558526, -0.26332027, -0.22029407, -0.07410065, 0.0059340284, -0.11234615, -0.06898532, 0.046516087, 0.06798138, -0.049193274, -0.04687464, -0.049002044, -0.03145692, 0.0065614935, -0.021274058, -0.060188852, -0.04584679, -0.015979448, 0.08949447, -0.050197218, -0.051440194, -0.1341461, 0.08557431, -0.08261028, -0.104314595, -0.016134819, 0.057320442, -0.022421423, 0.012501498, 0.055503782, 0.020568907, -0.095183484, 0.0049450235, -0.03525757, 0.17688543, -0.06888971, -0.005694994, 0.05622088, -0.04250031, 0.050053798, 0.16063109, 0.06353533}));
docs.add(new Document("item4", "1", "Excuse me please.", new double[] {-0.19188246, 0.018719073, -0.032395326, 0.09734556, 0.021671357, -0.11751684, -0.078514785, 0.16507255, -0.0012956136, 0.117006175, -0.065492816, 0.106282204, -0.009750514, -0.006008296, 0.021799022, 0.04643862, -0.046023704, -0.023442727, 0.12868765, -0.1466886, -0.085089594, -0.046885453, -0.0067742937, 0.048162118, -0.04739612, 0.021687314, -0.025581138, 0.04841745, -0.10519704, -0.039129723, -0.09747323, 0.10532471, 0.04375763, 0.09536674, 0.0145938555, -0.0060681393, 0.26171595, 0.1815415, -0.03833181, 0.012487361, -0.027192924, -0.12281499, 0.017937116, -0.02173519, 0.07308897, -0.06913131, 0.07417413, -0.01884674, 0.049023863, -0.049949445, 0.081068106, 0.22060739, -0.031645287, -0.024735348, -0.041108552, 0.1823075, -0.06230116, -0.119048834, -0.07813178, -0.0841321, -0.007711843, 0.039576557, -0.07589762, 0.028198296, 0.003087929, 0.047970615, 0.0845151, 0.08208944, 0.07423796, 0.01259907, 0.00046179298, 0.024671515, 0.10302671, 0.12160216, 0.1353263, -0.16251922, 0.069195144, -0.09160058, 0.033320908, -0.06341824, -0.06402466, -0.048864283, -0.10053722, -0.019341446, 0.027033342, -0.19354212, -0.011146865, -0.31329313, 0.054513514, -0.0098861605, 0.10277138, 0.059237167, 0.021495815, -0.0704718, 0.14285861, 0.042672466, 0.057769008, 0.054353934, -0.041363884, 0.07819562, 0.1085802, -0.0047874865, 0.0035626881, 0.025405597, 0.0032953867, 0.13430496, -0.084451266, -0.10883553, 0.115601845, -0.072259136, -0.06976964, -0.1081972, 0.08515343, 0.044715125, 0.05725834, -0.06759931, -0.0421618, -0.06185433, -0.068939805, -0.13673063, -0.032874074, -0.121538326, -0.010157451, -0.048608948, 0.049949445, 0.031310163, 0.13238996, 0.06855681}));
docs.add(new Document("item5", "2", "Is this the right place?", new double[] {-0.05695127, 0.07729321, -0.07253956, 0.054049686, -0.084886715, -0.1677979, -0.020681491, -0.010765179, -0.05312365, 0.10964277, -0.1724898, -0.0139754405, -0.019446775, -0.009877727, 0.10902541, 0.06599557, -0.20224646, -0.008658445, -0.11698933, -0.00034678154, 0.059760246, 0.023660243, 0.014523345, 0.058340326, -0.116927594, -0.0011546522, 0.035991967, 0.017857078, -0.21261807, -0.07568809, -0.007250097, 0.09525833, 0.073033445, -0.078157514, -0.14816591, -0.089578636, -0.006030815, 0.08519539, 0.059852853, 0.12328638, 0.08544234, -0.017656436, -0.03901702, 0.036238912, -0.09482618, 0.007215371, 0.15742627, 0.014183799, 0.107914165, 0.014245534, -0.011907292, 0.025188204, 0.057630364, -0.057321683, 0.0024366346, 0.034695517, 0.11766842, -0.16520499, 0.065193, 0.10822285, -0.06834152, -0.048925616, -0.078836605, 0.05161112, 0.07235435, 0.07636718, -0.075996764, 0.13902901, 0.023860885, 0.07846619, 0.02665443, -0.026870504, -0.0084115015, 0.07550287, 0.07500899, -0.07395948, 0.05062335, 0.05621044, -0.031531557, -0.001280053, 0.06908235, 0.078280985, -0.060068928, 0.14236274, 0.14236274, 0.06358787, -0.042474225, -0.38053942, 0.062785305, 0.050870296, 0.038677476, -0.078157514, 0.029309068, -0.07809578, 0.07439163, -0.06772417, 0.11896487, 0.073589064, 0.05238282, 0.018196626, 0.116371974, 0.0033067234, -0.020264775, -0.006304768, -0.10686466, 0.08408415, -0.04386328, -0.0068681072, 0.08994905, -0.059513304, 0.021823604, 0.049419504, -0.012817894, -0.093221046, 0.02802805, 0.089578636, 0.07124311, -0.26694557, 0.024833223, -0.03802925, -0.18483697, -0.1103836, 0.11877967, 0.07000839, -0.018860284, -0.044974525, -0.034016423, 0.022780508}));
docs.add(new Document("item6", "1", "Awful weather today.", new double[] {0.002305239, -0.02629875, 0.009952777, 0.026884208, -0.0067561795, -0.1118458, 0.07432968, 0.0999493, 0.02177902, -0.047726493, 0.042059265, 0.1583077, 0.11175212, 0.07685886, -0.14060347, 0.005084698, -0.19277944, -0.05606341, 0.082619764, -0.04241054, 0.09620237, 0.011832096, -0.023301208, 0.28214368, 0.02451896, 0.02793803, 0.03358184, 0.044424515, 0.11006601, 0.038101573, 0.0077982936, 0.045572013, 0.075922124, 0.016369391, -0.0039986745, 0.22781321, 0.062854715, 0.048054352, 0.010251361, 0.07170683, -0.018816603, 0.027329156, -0.08482108, -0.079481706, 0.042785235, 0.024120849, 0.17413847, 0.035431888, 0.12439801, 0.10641275, -0.14828467, -0.024425287, 0.083415985, 0.1184966, 0.0026799317, 0.15399873, -0.010304051, 0.009742012, -0.10781785, -0.019472316, 0.061777476, -0.09798217, 0.028804509, -0.023371464, 0.015491205, 0.07521958, 0.024003757, 0.013465522, -0.089692086, -0.097794816, 0.021193562, -0.0592483, -0.056438103, -0.10987866, 0.11802823, -0.06440032, 0.07704621, -0.040138967, -0.13891736, -0.16027485, 0.08631986, -0.16786237, -0.085757814, 0.015491205, -0.013243048, 0.09133137, -0.16196096, -0.2313728, -0.083134964, 0.12308659, 0.07559427, 0.09723278, 0.048663225, -0.13339064, 0.016345974, -0.13189186, 0.11025336, 0.03346475, -0.00688498, -0.059435643, 0.0056877197, 0.014999421, 0.063650936, -0.15053283, -0.017481761, 0.05194179, 0.061402783, 0.0077924393, 0.19971126, -0.07713988, 0.06425981, 0.0021369199, 0.12158781, -0.024331613, -0.008571098, 0.03264511, -0.0020242194, -0.05507984, -0.00805004, -0.03304322, 0.050630365, -0.1475353, -0.003735219, -0.0202217, 0.16271034, -0.059435643, 0.06023187, 0.06660164}));
docs.add(new Document("item7", "2", "Dinosaurs were huge.", new double[] {0.09842033, -0.025395654, -0.03831241, 0.037825905, 0.17008278, -0.022269849, -0.05935383, 0.032668933, -0.022148222, 0.08178179, -0.062710725, 0.164342, -0.024021273, -0.052640036, 0.027366007, 0.18098053, -0.18487258, -0.008903074, 0.16346629, 0.009407825, 0.110339746, -0.046315446, 0.08046822, 0.12512955, -0.06635953, 0.0070239417, 0.055753678, -0.0005336371, -0.012326866, 0.1578228, -0.023668556, -0.035904203, 0.05988899, -0.08032227, -0.11520481, 0.08499274, 0.05093726, -0.09248494, -0.011128843, -0.07064079, 0.01466818, 0.082073696, -0.040574666, -0.07292737, 0.03539337, 0.05025615, -0.1145237, 0.09584184, 0.04186391, -0.034833886, -0.18234275, -0.090441614, -0.09194978, -0.031331036, -0.13106494, 0.068208255, 0.03220675, -0.031233737, -0.04337208, 0.1289243, 0.1363192, 0.052494083, 0.03074723, -0.000113359885, 0.07681943, 0.03962598, 0.016529068, -0.04191256, -0.03612313, 0.023084749, 0.10917213, 0.09477153, -0.09652295, -0.0999285, 0.11685894, -0.012649177, 0.043688312, -0.10333405, -0.060521446, -0.042034186, 0.0483831, 0.0028141378, -0.17270991, 0.05424551, 0.25570798, 0.09511208, -0.08504139, -0.1570444, -0.084262975, -0.13291366, -0.023741532, -0.14857918, 0.09190113, -0.08041958, -0.019837314, 0.09569589, -0.053029243, -0.030722905, -0.05239678, -0.15091442, -0.05872137, -0.056045584, 0.11831845, -0.1145237, -0.08761988, -0.0035727844, -0.05570503, 0.19285129, 0.011761302, 0.087717175, 0.107712604, -0.11277228, -0.042423394, 0.13048112, -0.03356897, 0.056775343, 0.08640361, -0.11831845, -0.10868562, 0.0410855, 0.12036178, -0.09477153, -0.017611546, 0.0075043673, -0.12668636, -0.006391483, 0.0012185475, -0.05161837}));
docs.add(new Document("item8", "1", "The hero saves the day again.", new double[] {0.11208976, 0.058739875, -0.017022463, 0.15080968, -0.0031057745, -0.048069898, -0.059069872, 0.09861479, 0.11626975, 0.12935972, 0.008882481, 0.08766981, -0.00940498, -0.077164836, 0.0015795279, 0.06297486, -0.06313986, 0.0931698, -0.17247963, -0.12077974, -0.044797402, -0.1404697, -0.050077394, 0.020432455, 0.07897983, 0.022632452, -0.046914898, 0.0031212433, -0.059399873, 0.03321993, -0.07237984, 0.10119978, 0.19612958, -0.086349815, 0.038252417, 0.084094815, 0.16257966, 0.15520966, 0.063304864, -0.08761481, 0.06132487, -0.08486482, -0.09789979, 0.06935485, -0.04160741, 0.05585238, -0.13485971, 0.105544776, -0.08084983, 0.103344776, 0.053817384, -0.0923998, -0.052607387, 0.015537467, 0.025299946, -0.05961987, 0.08090483, -0.25453946, 0.01894746, -0.026344944, -0.14552969, 0.014341219, 0.14409968, 0.12264974, -0.0456224, -0.13529971, -0.0466674, 0.006166862, 0.02776119, -0.020184956, 0.093939796, 0.054779883, -0.09635979, -0.016334964, -0.029177437, -0.06863985, 0.19139959, -0.08794481, -0.08464482, -0.013413096, 0.022109952, -0.122099735, -0.0458149, -0.019676207, 0.10785477, -0.109119765, -0.09690979, -0.28423938, 0.050709892, 0.12803972, 0.10620477, 0.12110974, -0.006658423, -0.052304886, -0.07798983, -0.035667423, -0.07507484, 0.02745869, 0.07237984, -0.0230862, -0.03684992, -0.067539856, -0.052387387, -0.05202989, 0.14244969, 0.080684826, -0.038472418, 0.112639755, 0.03242243, -0.07501984, 0.10631477, -0.024076197, -0.07754983, 0.06610986, -0.12671973, -0.044082403, 0.006001862, 0.037454918, 0.054504883, -0.03679492, 0.076669835, 0.02271495, 0.14794968, 0.06440486, -0.006850923, -0.06984985, 0.035639923, -0.009143731}));
docs.add(new Document("item9", "2", "Don't worry about it.", new double[] {-0.051598575, 0.22204931, -0.017881807, 0.11678282, 0.18426134, -0.03713568, -0.016847137, 0.06549915, 0.057626653, 0.032569632, 0.00076827104, -0.04489571, -0.07530603, 0.10778569, 0.030477798, 0.050338972, -0.21053298, -0.04341118, -0.097708896, -0.13432723, 0.1438642, 0.059606023, -0.12299085, -0.036820777, -0.026699001, 0.18381149, -0.02861089, 0.08259371, -0.14962237, -0.07373153, 0.02321261, 0.085607745, -0.13810603, 0.065139264, -0.12685962, 0.14098512, 0.17112552, -0.035921063, -0.008536032, -0.09680918, 0.04435588, -0.16086878, -0.035718627, 0.09689915, -0.0007956842, 0.034256592, -0.00234769, 0.04577293, 0.06725359, 0.015958669, -0.06486935, 0.124160476, 0.09887852, -0.050518915, -0.07080746, -0.078859895, 0.17013584, -0.22078972, -0.10103783, 0.06873812, 0.0370682, 0.04563797, -0.060235824, -0.056816913, -0.064689405, 0.11273411, 0.16572724, 0.108415484, 0.07921978, 0.05569227, -0.11210431, -0.05848138, -0.008361713, 0.07458626, -0.08992637, -0.07557594, -0.0020102975, -0.07080746, -0.0092614265, 0.06626391, -0.05848138, -0.078545, 0.08362838, -0.031737395, -0.047549862, -0.15367107, 0.093930095, -0.087182246, 0.06401462, 0.09006133, 0.10886534, 0.013338254, 0.025551865, -0.027553728, 0.14206477, -0.09060115, -0.07818511, -0.001209693, 0.017893054, -0.069367915, 0.0709874, 0.050249003, -0.13396735, -0.056681953, -0.022166694, -0.02170559, 0.08277365, -0.101667635, 0.09096104, 0.049529232, -0.095819496, -0.08974643, 0.054477658, -0.037967913, -0.08682236, 0.077690266, 0.03828281, -0.04136433, -0.14431405, -0.060505737, 0.025619343, -0.019400073, 0.11075474, 0.066893704, 0.07494614, 0.03684327, 0.03929499, -0.017353225}));
return docs;
}
public String getQueryVector() {
return "0.13481648, 0.022579897, -0.038054377, 0.035029914, 0.15404047, -0.012947189, 0.013434003, 0.0328755,0.0859279, 0.090071, 0.07391291, 0.10896354, 0.04085097, 0.019876525, 0.013806882, 0.03799223,-0.097528584, -0.10324606, -0.07863604, -0.01846787, -0.0018139011, 0.09686569, 0.0065512774,0.014107257, 0.0004389097, 0.07432722, 0.012698603, 0.09404838, 0.11592395, -0.08290344, -0.023802113,0.0771031, 0.15578057, 0.17152436, 0.06732538, 0.04408259, 0.04163816, 0.03196402, 0.08451925,0.05365315, -0.10473758, 0.054730356, -0.0686926, 0.12412729, 0.1910798, -0.048971448, -0.007649199,-0.059122045, -0.005241022, 0.021440545, -0.1014231, 0.08282058, -0.057671957, -0.024755025, -0.07619162,0.0966171, 0.047272775, -0.20384054, -0.024319999, -0.014739079, 0.03681145, -0.03331053, 0.09172824,-0.09744572, 0.08584504, 0.13440217, -0.019234344, 0.19605151, 0.030389642, -0.0646738, 0.08447782,-0.107969195, -0.19058262, -0.05369458, 0.071675636, -0.095871344, 0.030141056, 0.025107188, 0.06309942,-0.09951727, 0.031446133, -0.07888463, -0.08182623, -0.063762315, -0.0955399, -0.03948375, -0.031073254,-0.29979473, -0.06711823, 0.07743455, 0.0074161496, 0.01248109, -0.0812462, -0.0676154, 3.0668652e-05,-0.17931339, 0.2008575, 0.026950868, 0.11111795, 0.07507298, -0.00898017, 0.038800135, -0.015267325,-0.13788238, -0.010740988, -0.00870569, -0.037743647, -0.12445874, 0.014676933, -0.27344462, -0.05738194,0.07101274, -0.023615673, 0.0040369336, -0.039028008, 0.06546099, -0.072421394, -0.17119291,-0.019358637, -0.05489608, 0.006199114, 0.0515816, 0.050048653, -0.034843475, 0.07847032, -0.13315925,0.025335059, -0.0432954";
}
static class Document {
@JsonProperty("id")
String id;
@JsonProperty("pk")
String pk;
@JsonProperty("text")
String text;
@JsonProperty("embedding")
double[] embedding;
@JsonProperty("score")
double score;
public Document(String id, String pk, String text, double[] embedding) {
this.id = id;
this.pk = pk;
this.text = text;
this.embedding = embedding;
}
public Document() {}
public String getId() { return id; }
public double getScore() {
return score;
}
}
} | class NonStreamingOrderByQueryVectorSearchTest {
protected static final int TIMEOUT = 30000;
protected static final int SETUP_TIMEOUT = 20000;
protected static final int SHUTDOWN_TIMEOUT = 20000;
protected static Logger logger = LoggerFactory.getLogger(NonStreamingOrderByQueryVectorSearchTest.class.getSimpleName());
private final String databaseId = CosmosDatabaseForTest.generateId();
private final String flatContainerId = "flat_" + UUID.randomUUID();
private final String quantizedContainerId = "quantized_" + UUID.randomUUID();
private final String largeDataContainerId = "large_data_" + UUID.randomUUID();
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer flatIndexContainer;
private CosmosAsyncContainer quantizedIndexContainer;
private CosmosAsyncContainer largeDataContainer;
@BeforeClass(groups = {"query", "split"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"query"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeDeleteDatabase(database);
safeClose(client);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void flatIndexVectorSearch() {
String queryVector = getQueryVector();
String vanilla_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = flatIndexContainer.queryItems(vanilla_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String dotproductSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = flatIndexContainer.queryItems(dotproductSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT)
public void quantizedIndexVectorSearch() {
String queryVector = getQueryVector();
String vanillaQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = quantizedIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
String euclideanSpecsQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'euclidean'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(euclideanSpecsQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, true);
String dotproduct_specs_query = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s], {'distanceFunction': 'dotproduct'}) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s], false, {'distanceFunction': 'dotproduct'})", queryVector, queryVector);
resultDocs = quantizedIndexContainer.queryItems(dotproduct_specs_query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
@Test(groups = {"query"}, timeOut = TIMEOUT * 40)
public void largeDataVectorSearch() {
double embeddingValue = 0.0001;
for (int i = 1; i <= 2000; i++) {
Document doc = new Document(String.valueOf(i), String.valueOf(i % 2), "text" + i, new double[]{embeddingValue, embeddingValue});
largeDataContainer.createItem(doc).block();
embeddingValue = 0.0001 * (i + 1);
}
String query = "SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000";
List<Document> resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = "SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 0 LIMIT 1000";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = "SELECT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = "SELECT DISTINCT c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001]) OFFSET 1000 LIMIT 500";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(500, resultDocs, false);
query = "SELECT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
query = "SELECT DISTINCT TOP 1000 c.id, VectorDistance(c.embedding, [0.0001, 0.0001]) AS score FROM c ORDER BY" +
" VectorDistance(c.embedding, [0.0001, 0.0001])";
resultDocs = largeDataContainer.queryItems(query, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(1000, resultDocs, false);
}
@Test(groups = {"split"}, timeOut = TIMEOUT * 40)
public void splitHandlingVectorSearch() throws Exception {
AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client);
List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(flatContainerId, asyncDocumentClient);
String queryVector = getQueryVector();
String vanillaQuery = String.format("SELECT DISTINCT TOP 6 c.text, VectorDistance(c.embedding, [%s]) AS " +
"score FROM c ORDER BY VectorDistance(c.embedding, [%s])", queryVector, queryVector);
List<Document> resultDocs = flatIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
logger.info("Scaling up throughput for split");
ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000);
ThroughputResponse throughputResponse = flatIndexContainer.replaceThroughput(throughputProperties).block();
logger.info("Throughput replace request submitted for {} ",
throughputResponse.getProperties().getManualThroughput());
throughputResponse = flatIndexContainer.readThroughput().block();
while (true) {
assert throughputResponse != null;
if (!throughputResponse.isReplacePending()) {
break;
}
logger.info("Waiting for split to complete");
Thread.sleep(10 * 1000);
throughputResponse = flatIndexContainer.readThroughput().block();
}
List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(flatContainerId, asyncDocumentClient);
assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size())
.as("Partition ranges should increase after split");
logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size());
resultDocs = flatIndexContainer.queryItems(vanillaQuery, new CosmosQueryRequestOptions(), Document.class).byPage()
.flatMap(feedResponse -> Flux.fromIterable(feedResponse.getResults()))
.collectList().block();
validateOrdering(6, resultDocs, false);
}
private List<PartitionKeyRange> getPartitionKeyRanges(
String containerId, AsyncDocumentClient asyncDocumentClient) {
List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>();
List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient
.readPartitionKeyRanges("/dbs/" + database.getId()
+ "/colls/" + containerId,
new CosmosQueryRequestOptions())
.collectList().block();
partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults()));
return partitionKeyRanges;
}
private void validateOrdering(int top, List<Document> docs, boolean isEucledian) {
assertThat(docs.size()).isEqualTo(top);
if (isEucledian) {
for (int i = 0; i < docs.size() - 1; i++) {
assertThat(docs.get(i).getScore()).isLessThanOrEqualTo(docs.get(i + 1).getScore());
}
} else {
for (int i = 0; i < docs.size() - 1; i++) {
assertThat(docs.get(i).getScore()).isGreaterThanOrEqualTo(docs.get(i + 1).getScore());
}
}
}
private CosmosVectorEmbeddingPolicy populateVectorEmbeddingPolicy(Long dimensions) {
CosmosVectorEmbeddingPolicy policy = new CosmosVectorEmbeddingPolicy();
CosmosVectorEmbedding embedding = new CosmosVectorEmbedding();
embedding.setPath("/embedding");
embedding.setDataType(CosmosVectorDataType.FLOAT32);
embedding.setDimensions(dimensions);
embedding.setDistanceFunction(CosmosVectorDistanceFunction.COSINE);
policy.setCosmosVectorEmbeddings(Collections.singletonList(embedding));
return policy;
}
private IndexingPolicy populateIndexingPolicy(CosmosVectorIndexType vectorIndexType) {
IndexingPolicy indexingPolicy = new IndexingPolicy();
indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT);
IncludedPath includedPath1 = new IncludedPath("/*");
indexingPolicy.setIncludedPaths(Collections.singletonList(includedPath1));
CosmosVectorIndexSpec cosmosVectorIndexSpec = new CosmosVectorIndexSpec();
cosmosVectorIndexSpec.setPath("/embedding");
cosmosVectorIndexSpec.setType(vectorIndexType.toString());
indexingPolicy.setVectorIndexes(Collections.singletonList(cosmosVectorIndexSpec));
return indexingPolicy;
}
public List<Document> getVectorDocs() {
List<Document> docs = new ArrayList<>();
docs.add(new Document("item0", "1", "Good morning!", new double[]{-0.008334724, -0.05993167, -0.0903545, -0.04791922, -0.01825805, -0.053011455, 0.120733805, 0.017714009, 0.07346743, 0.11559805, 0.03262076, 0.074512, 0.015864266, 0.01981401, 0.007850527, 0.076296456, -0.08539284, 0.016593281, -0.05423011, 0.07520837, 0.074250855, 0.056754466, -0.022098986, 0.03155444, 0.04334927, 0.024655985, -0.02109795, 0.044023883, -0.027550288, -0.11350893, -0.022806242, 0.08608921, 0.009221513, 0.06659074, 0.09087678, 0.024830079, 0.0075513036, 0.036472578, 0.015418151, 0.060497474, 0.010940685, -0.059365865, 0.043566886, 0.00427073, -0.023546139, 0.030357545, -0.03403527, 0.1182965, 0.1115939, -0.018954424, 0.0032452107, 0.10297628, 0.15328929, -0.016952349, -0.04530782, 0.04674409, -8.351895e-05, -0.19376601, -0.025091218, -0.03664667, -0.011860116, -0.10454312, -0.13109237, -0.049268447, 0.17557324, 0.044872586, 0.046787616, 0.15337633, -0.019509347, 0.0077743605, 0.04556896, -0.08413066, -0.028681897, 0.1209079, 0.1357929, -0.09314, 0.12534729, -0.065546185, 0.12212656, 0.04892026, 0.07394619, -0.08134516, -0.004493787, 0.08138869, 0.028573086, 0.12290998, -0.16477945, -0.29839617, -0.08090993, 0.12256179, 0.16591106, -0.08173688, -0.034383457, -0.1076768, -0.043022845, -0.07655759, 0.2021225, 0.03923631, 0.07703635, -0.08587159, 0.06498038, -0.08330371, 0.16486649, -0.14040637, 0.02070624, -0.069855, 0.052880887, 0.016136287, 0.00024294876, -0.19968519, 0.06933272, 0.013241983, 0.0004002109, 0.14998151, 0.07516485, 0.18610589, -0.07895138, -0.108982496, -0.03494926, -0.027637335, -0.032925423, -0.009509855, 0.1182965, -0.075513035, -0.08665501, 0.019629037, 0.2583547, 0.00983084}));
docs.add(new Document("item1", "2", "Good afternoon!", new double[]{-0.04136167, -0.047741424, -0.08737179, -3.0502639e-05, 0.054595508, -0.11298566, 0.0906921, 0.108052626, 0.04729081, 0.21287979, -0.06588459, -0.052793052, -0.05568647, 0.017384235, -0.02518698, 0.021665072, -0.19238868, 0.03379609, 0.0075359354, -0.04989963, 0.055449303, 0.03282371, 0.026064493, 0.042096883, -0.007192045, 0.0786441, -0.09012291, 0.094012424, -0.0031483725, -0.0318039, -0.034721036, 0.10255038, 0.08851018, 0.11327027, 0.031614166, 0.006035863, 0.021321183, 0.0703433, 0.14201473, 0.058674756, -0.007986549, 0.03329804, 0.08884221, -0.09505595, -0.015522485, -0.068161376, 0.072572656, 0.049235567, 0.2263508, 0.029574543, -0.050563693, 0.050516263, 0.04660303, 0.08376687, 0.017514676, 0.0706279, 0.007921329, -0.1353741, -0.054358345, -0.1628853, -0.097617336, -0.123326086, -0.0489984, -0.0693472, 0.15396787, 0.027368903, 0.06042978, 0.22862759, -0.016293272, 0.033582643, 0.06697555, -0.10302471, -0.12104929, 0.18034068, 0.068303674, 0.003468546, 0.07480201, -0.1204801, 0.06787678, -0.042926963, 0.15785739, -0.034531303, -0.028934196, 0.022933908, -0.012012435, 0.004248228, -0.19172463, -0.31837103, -0.099230066, 0.02071641, 0.18546346, 0.06299117, -0.047053643, -0.09206767, 0.049472734, -0.046365865, 0.18214314, 0.0051079527, 0.105680965, -0.026443958, 0.072714955, -0.08073115, 0.09429702, -0.113744594, 0.02092986, -0.013625161, -0.07390078, 0.020550394, 0.011787128, -0.13499463, 0.015107445, -0.0015223064, 0.040104695, 0.094961084, 0.13404597, 0.082486175, -0.07428025, -0.026847139, -0.025163265, -0.09818654, 0.010043961, 0.013601444, 0.077932596, 0.016056107, -0.026965722, 0.045606934, 0.085047565, -0.005878741}));
docs.add(new Document("item2", "1", "Great weather today!", new double[]{0.07557184, -0.047335204, -0.054325826, 0.011354052, 0.07698824, -0.025358144, -0.07927276, 0.1510523, 0.022251202, 0.026249107, -0.021725763, 0.062047496, 0.14556946, -0.0006071819, -0.107098185, 0.021394506, -0.13771072, -0.10527057, 0.115048304, -0.048842985, -0.059443146, 0.060585406, -0.014940745, 0.18075103, 0.053777542, -0.022057017, 0.08493836, 0.1198001, 0.041555375, -0.066525154, 0.054280136, 0.107646465, 0.023359193, -0.0046632714, -0.1017981, 0.04105278, 0.11376897, 0.016437104, 0.067393266, 0.0961325, -0.017727856, -0.03333111, -0.020492123, -0.041943744, 0.0031040881, -0.028053876, 0.053640474, -0.02215982, 0.08205987, 0.015100661, -0.17133881, -0.05843796, 0.0895074, -0.04064157, -0.10225501, 0.11367759, -0.03593546, -0.010999952, -0.14273666, -0.010383132, 0.026112035, -0.14200561, -0.0634639, -0.04029889, 0.07383561, 0.116784535, 0.0805064, 0.13241065, -0.100884296, -0.10216363, -0.030384084, -0.07657703, -0.06323545, -0.013101708, 0.15233164, -0.009880538, 0.16448526, -0.11148446, -0.05606206, -0.11587073, 0.084892675, -0.1397211, -0.04948265, 0.006470896, 0.015637523, 0.09051259, -0.12665366, -0.28583884, -0.16046451, 0.055833608, 0.100244634, 0.013535767, -0.07155109, -0.10051877, 0.08662891, -0.0729218, 0.15818, 0.025449526, 0.05053353, -0.015740326, -0.0767141, -0.056381892, 0.091517776, -0.075114936, -0.04621579, -0.040458806, 0.03947646, 0.10901718, 0.0076417113, -0.34487078, 0.024375802, -0.072419204, 0.053000808, 0.04057303, -0.08434439, 0.027185759, 0.03403931, -0.06332683, 0.051858548, 0.011993717, 0.10728095, -0.09887392, 0.07593736, -0.0025172527, 0.1089258, -0.048934367, 0.051264573, 0.017008234}));
docs.add(new Document("item3", "2", "Hope you're doing well.", new double[]{-0.016385807, 0.16914073, -0.062722616, 0.009645036, 0.06367876, -0.10871283, -0.034588274, 0.0038275379, 0.107661076, 0.069654614, 0.036404934, -0.037910853, 0.08872956, 0.14571536, -0.082227826, 0.034731694, -0.07405285, 0.050340638, -0.16569862, -0.20671692, -0.0834708, 0.0043175584, -0.017234378, 0.052683175, 0.013863994, 0.12142946, -0.002630872, 0.069702424, 0.095804974, -0.10020321, 0.0793116, 0.028970966, 0.13940485, 0.15814514, 0.11597948, -0.03795866, -0.018178564, 0.14753202, -0.10670494, -0.055455975, 0.058037546, -0.04457991, -0.0046014115, -0.018704439, 0.07902476, -0.07902476, 0.031026661, -0.017855868, 0.098769, 0.118847884, -0.051488005, 0.009017572, 0.15068726, 0.044962365, 0.18233542, -0.0006853563, 0.11205931, -0.2602606, 0.09848216, 0.08681728, -0.077638365, -0.008664995, 0.010314333, -0.0361659, 0.047185384, 0.09202823, 0.004359389, -0.008545479, -0.08815587, 0.0765388, 0.012262463, 0.0542608, -0.10813915, 0.11722245, 0.013744476, -0.08265808, 0.008055458, 0.12085578, 0.056364305, -0.12907855, -0.05311344, -0.060666922, 0.117031224, 0.029233903, -0.148775, 0.017879771, -0.081558526, -0.26332027, -0.22029407, -0.07410065, 0.0059340284, -0.11234615, -0.06898532, 0.046516087, 0.06798138, -0.049193274, -0.04687464, -0.049002044, -0.03145692, 0.0065614935, -0.021274058, -0.060188852, -0.04584679, -0.015979448, 0.08949447, -0.050197218, -0.051440194, -0.1341461, 0.08557431, -0.08261028, -0.104314595, -0.016134819, 0.057320442, -0.022421423, 0.012501498, 0.055503782, 0.020568907, -0.095183484, 0.0049450235, -0.03525757, 0.17688543, -0.06888971, -0.005694994, 0.05622088, -0.04250031, 0.050053798, 0.16063109, 0.06353533}));
docs.add(new Document("item4", "1", "Excuse me please.", new double[]{-0.19188246, 0.018719073, -0.032395326, 0.09734556, 0.021671357, -0.11751684, -0.078514785, 0.16507255, -0.0012956136, 0.117006175, -0.065492816, 0.106282204, -0.009750514, -0.006008296, 0.021799022, 0.04643862, -0.046023704, -0.023442727, 0.12868765, -0.1466886, -0.085089594, -0.046885453, -0.0067742937, 0.048162118, -0.04739612, 0.021687314, -0.025581138, 0.04841745, -0.10519704, -0.039129723, -0.09747323, 0.10532471, 0.04375763, 0.09536674, 0.0145938555, -0.0060681393, 0.26171595, 0.1815415, -0.03833181, 0.012487361, -0.027192924, -0.12281499, 0.017937116, -0.02173519, 0.07308897, -0.06913131, 0.07417413, -0.01884674, 0.049023863, -0.049949445, 0.081068106, 0.22060739, -0.031645287, -0.024735348, -0.041108552, 0.1823075, -0.06230116, -0.119048834, -0.07813178, -0.0841321, -0.007711843, 0.039576557, -0.07589762, 0.028198296, 0.003087929, 0.047970615, 0.0845151, 0.08208944, 0.07423796, 0.01259907, 0.00046179298, 0.024671515, 0.10302671, 0.12160216, 0.1353263, -0.16251922, 0.069195144, -0.09160058, 0.033320908, -0.06341824, -0.06402466, -0.048864283, -0.10053722, -0.019341446, 0.027033342, -0.19354212, -0.011146865, -0.31329313, 0.054513514, -0.0098861605, 0.10277138, 0.059237167, 0.021495815, -0.0704718, 0.14285861, 0.042672466, 0.057769008, 0.054353934, -0.041363884, 0.07819562, 0.1085802, -0.0047874865, 0.0035626881, 0.025405597, 0.0032953867, 0.13430496, -0.084451266, -0.10883553, 0.115601845, -0.072259136, -0.06976964, -0.1081972, 0.08515343, 0.044715125, 0.05725834, -0.06759931, -0.0421618, -0.06185433, -0.068939805, -0.13673063, -0.032874074, -0.121538326, -0.010157451, -0.048608948, 0.049949445, 0.031310163, 0.13238996, 0.06855681}));
docs.add(new Document("item5", "2", "Is this the right place?", new double[]{-0.05695127, 0.07729321, -0.07253956, 0.054049686, -0.084886715, -0.1677979, -0.020681491, -0.010765179, -0.05312365, 0.10964277, -0.1724898, -0.0139754405, -0.019446775, -0.009877727, 0.10902541, 0.06599557, -0.20224646, -0.008658445, -0.11698933, -0.00034678154, 0.059760246, 0.023660243, 0.014523345, 0.058340326, -0.116927594, -0.0011546522, 0.035991967, 0.017857078, -0.21261807, -0.07568809, -0.007250097, 0.09525833, 0.073033445, -0.078157514, -0.14816591, -0.089578636, -0.006030815, 0.08519539, 0.059852853, 0.12328638, 0.08544234, -0.017656436, -0.03901702, 0.036238912, -0.09482618, 0.007215371, 0.15742627, 0.014183799, 0.107914165, 0.014245534, -0.011907292, 0.025188204, 0.057630364, -0.057321683, 0.0024366346, 0.034695517, 0.11766842, -0.16520499, 0.065193, 0.10822285, -0.06834152, -0.048925616, -0.078836605, 0.05161112, 0.07235435, 0.07636718, -0.075996764, 0.13902901, 0.023860885, 0.07846619, 0.02665443, -0.026870504, -0.0084115015, 0.07550287, 0.07500899, -0.07395948, 0.05062335, 0.05621044, -0.031531557, -0.001280053, 0.06908235, 0.078280985, -0.060068928, 0.14236274, 0.14236274, 0.06358787, -0.042474225, -0.38053942, 0.062785305, 0.050870296, 0.038677476, -0.078157514, 0.029309068, -0.07809578, 0.07439163, -0.06772417, 0.11896487, 0.073589064, 0.05238282, 0.018196626, 0.116371974, 0.0033067234, -0.020264775, -0.006304768, -0.10686466, 0.08408415, -0.04386328, -0.0068681072, 0.08994905, -0.059513304, 0.021823604, 0.049419504, -0.012817894, -0.093221046, 0.02802805, 0.089578636, 0.07124311, -0.26694557, 0.024833223, -0.03802925, -0.18483697, -0.1103836, 0.11877967, 0.07000839, -0.018860284, -0.044974525, -0.034016423, 0.022780508}));
docs.add(new Document("item6", "1", "Awful weather today.", new double[]{0.002305239, -0.02629875, 0.009952777, 0.026884208, -0.0067561795, -0.1118458, 0.07432968, 0.0999493, 0.02177902, -0.047726493, 0.042059265, 0.1583077, 0.11175212, 0.07685886, -0.14060347, 0.005084698, -0.19277944, -0.05606341, 0.082619764, -0.04241054, 0.09620237, 0.011832096, -0.023301208, 0.28214368, 0.02451896, 0.02793803, 0.03358184, 0.044424515, 0.11006601, 0.038101573, 0.0077982936, 0.045572013, 0.075922124, 0.016369391, -0.0039986745, 0.22781321, 0.062854715, 0.048054352, 0.010251361, 0.07170683, -0.018816603, 0.027329156, -0.08482108, -0.079481706, 0.042785235, 0.024120849, 0.17413847, 0.035431888, 0.12439801, 0.10641275, -0.14828467, -0.024425287, 0.083415985, 0.1184966, 0.0026799317, 0.15399873, -0.010304051, 0.009742012, -0.10781785, -0.019472316, 0.061777476, -0.09798217, 0.028804509, -0.023371464, 0.015491205, 0.07521958, 0.024003757, 0.013465522, -0.089692086, -0.097794816, 0.021193562, -0.0592483, -0.056438103, -0.10987866, 0.11802823, -0.06440032, 0.07704621, -0.040138967, -0.13891736, -0.16027485, 0.08631986, -0.16786237, -0.085757814, 0.015491205, -0.013243048, 0.09133137, -0.16196096, -0.2313728, -0.083134964, 0.12308659, 0.07559427, 0.09723278, 0.048663225, -0.13339064, 0.016345974, -0.13189186, 0.11025336, 0.03346475, -0.00688498, -0.059435643, 0.0056877197, 0.014999421, 0.063650936, -0.15053283, -0.017481761, 0.05194179, 0.061402783, 0.0077924393, 0.19971126, -0.07713988, 0.06425981, 0.0021369199, 0.12158781, -0.024331613, -0.008571098, 0.03264511, -0.0020242194, -0.05507984, -0.00805004, -0.03304322, 0.050630365, -0.1475353, -0.003735219, -0.0202217, 0.16271034, -0.059435643, 0.06023187, 0.06660164}));
docs.add(new Document("item7", "2", "Dinosaurs were huge.", new double[]{0.09842033, -0.025395654, -0.03831241, 0.037825905, 0.17008278, -0.022269849, -0.05935383, 0.032668933, -0.022148222, 0.08178179, -0.062710725, 0.164342, -0.024021273, -0.052640036, 0.027366007, 0.18098053, -0.18487258, -0.008903074, 0.16346629, 0.009407825, 0.110339746, -0.046315446, 0.08046822, 0.12512955, -0.06635953, 0.0070239417, 0.055753678, -0.0005336371, -0.012326866, 0.1578228, -0.023668556, -0.035904203, 0.05988899, -0.08032227, -0.11520481, 0.08499274, 0.05093726, -0.09248494, -0.011128843, -0.07064079, 0.01466818, 0.082073696, -0.040574666, -0.07292737, 0.03539337, 0.05025615, -0.1145237, 0.09584184, 0.04186391, -0.034833886, -0.18234275, -0.090441614, -0.09194978, -0.031331036, -0.13106494, 0.068208255, 0.03220675, -0.031233737, -0.04337208, 0.1289243, 0.1363192, 0.052494083, 0.03074723, -0.000113359885, 0.07681943, 0.03962598, 0.016529068, -0.04191256, -0.03612313, 0.023084749, 0.10917213, 0.09477153, -0.09652295, -0.0999285, 0.11685894, -0.012649177, 0.043688312, -0.10333405, -0.060521446, -0.042034186, 0.0483831, 0.0028141378, -0.17270991, 0.05424551, 0.25570798, 0.09511208, -0.08504139, -0.1570444, -0.084262975, -0.13291366, -0.023741532, -0.14857918, 0.09190113, -0.08041958, -0.019837314, 0.09569589, -0.053029243, -0.030722905, -0.05239678, -0.15091442, -0.05872137, -0.056045584, 0.11831845, -0.1145237, -0.08761988, -0.0035727844, -0.05570503, 0.19285129, 0.011761302, 0.087717175, 0.107712604, -0.11277228, -0.042423394, 0.13048112, -0.03356897, 0.056775343, 0.08640361, -0.11831845, -0.10868562, 0.0410855, 0.12036178, -0.09477153, -0.017611546, 0.0075043673, -0.12668636, -0.006391483, 0.0012185475, -0.05161837}));
docs.add(new Document("item8", "1", "The hero saves the day again.", new double[]{0.11208976, 0.058739875, -0.017022463, 0.15080968, -0.0031057745, -0.048069898, -0.059069872, 0.09861479, 0.11626975, 0.12935972, 0.008882481, 0.08766981, -0.00940498, -0.077164836, 0.0015795279, 0.06297486, -0.06313986, 0.0931698, -0.17247963, -0.12077974, -0.044797402, -0.1404697, -0.050077394, 0.020432455, 0.07897983, 0.022632452, -0.046914898, 0.0031212433, -0.059399873, 0.03321993, -0.07237984, 0.10119978, 0.19612958, -0.086349815, 0.038252417, 0.084094815, 0.16257966, 0.15520966, 0.063304864, -0.08761481, 0.06132487, -0.08486482, -0.09789979, 0.06935485, -0.04160741, 0.05585238, -0.13485971, 0.105544776, -0.08084983, 0.103344776, 0.053817384, -0.0923998, -0.052607387, 0.015537467, 0.025299946, -0.05961987, 0.08090483, -0.25453946, 0.01894746, -0.026344944, -0.14552969, 0.014341219, 0.14409968, 0.12264974, -0.0456224, -0.13529971, -0.0466674, 0.006166862, 0.02776119, -0.020184956, 0.093939796, 0.054779883, -0.09635979, -0.016334964, -0.029177437, -0.06863985, 0.19139959, -0.08794481, -0.08464482, -0.013413096, 0.022109952, -0.122099735, -0.0458149, -0.019676207, 0.10785477, -0.109119765, -0.09690979, -0.28423938, 0.050709892, 0.12803972, 0.10620477, 0.12110974, -0.006658423, -0.052304886, -0.07798983, -0.035667423, -0.07507484, 0.02745869, 0.07237984, -0.0230862, -0.03684992, -0.067539856, -0.052387387, -0.05202989, 0.14244969, 0.080684826, -0.038472418, 0.112639755, 0.03242243, -0.07501984, 0.10631477, -0.024076197, -0.07754983, 0.06610986, -0.12671973, -0.044082403, 0.006001862, 0.037454918, 0.054504883, -0.03679492, 0.076669835, 0.02271495, 0.14794968, 0.06440486, -0.006850923, -0.06984985, 0.035639923, -0.009143731}));
docs.add(new Document("item9", "2", "Don't worry about it.", new double[]{-0.051598575, 0.22204931, -0.017881807, 0.11678282, 0.18426134, -0.03713568, -0.016847137, 0.06549915, 0.057626653, 0.032569632, 0.00076827104, -0.04489571, -0.07530603, 0.10778569, 0.030477798, 0.050338972, -0.21053298, -0.04341118, -0.097708896, -0.13432723, 0.1438642, 0.059606023, -0.12299085, -0.036820777, -0.026699001, 0.18381149, -0.02861089, 0.08259371, -0.14962237, -0.07373153, 0.02321261, 0.085607745, -0.13810603, 0.065139264, -0.12685962, 0.14098512, 0.17112552, -0.035921063, -0.008536032, -0.09680918, 0.04435588, -0.16086878, -0.035718627, 0.09689915, -0.0007956842, 0.034256592, -0.00234769, 0.04577293, 0.06725359, 0.015958669, -0.06486935, 0.124160476, 0.09887852, -0.050518915, -0.07080746, -0.078859895, 0.17013584, -0.22078972, -0.10103783, 0.06873812, 0.0370682, 0.04563797, -0.060235824, -0.056816913, -0.064689405, 0.11273411, 0.16572724, 0.108415484, 0.07921978, 0.05569227, -0.11210431, -0.05848138, -0.008361713, 0.07458626, -0.08992637, -0.07557594, -0.0020102975, -0.07080746, -0.0092614265, 0.06626391, -0.05848138, -0.078545, 0.08362838, -0.031737395, -0.047549862, -0.15367107, 0.093930095, -0.087182246, 0.06401462, 0.09006133, 0.10886534, 0.013338254, 0.025551865, -0.027553728, 0.14206477, -0.09060115, -0.07818511, -0.001209693, 0.017893054, -0.069367915, 0.0709874, 0.050249003, -0.13396735, -0.056681953, -0.022166694, -0.02170559, 0.08277365, -0.101667635, 0.09096104, 0.049529232, -0.095819496, -0.08974643, 0.054477658, -0.037967913, -0.08682236, 0.077690266, 0.03828281, -0.04136433, -0.14431405, -0.060505737, 0.025619343, -0.019400073, 0.11075474, 0.066893704, 0.07494614, 0.03684327, 0.03929499, -0.017353225}));
return docs;
}
public String getQueryVector() {
return "0.13481648, 0.022579897, -0.038054377, 0.035029914, 0.15404047, -0.012947189, 0.013434003, 0.0328755,0.0859279, 0.090071, 0.07391291, 0.10896354, 0.04085097, 0.019876525, 0.013806882, 0.03799223,-0.097528584, -0.10324606, -0.07863604, -0.01846787, -0.0018139011, 0.09686569, 0.0065512774,0.014107257, 0.0004389097, 0.07432722, 0.012698603, 0.09404838, 0.11592395, -0.08290344, -0.023802113,0.0771031, 0.15578057, 0.17152436, 0.06732538, 0.04408259, 0.04163816, 0.03196402, 0.08451925,0.05365315, -0.10473758, 0.054730356, -0.0686926, 0.12412729, 0.1910798, -0.048971448, -0.007649199,-0.059122045, -0.005241022, 0.021440545, -0.1014231, 0.08282058, -0.057671957, -0.024755025, -0.07619162,0.0966171, 0.047272775, -0.20384054, -0.024319999, -0.014739079, 0.03681145, -0.03331053, 0.09172824,-0.09744572, 0.08584504, 0.13440217, -0.019234344, 0.19605151, 0.030389642, -0.0646738, 0.08447782,-0.107969195, -0.19058262, -0.05369458, 0.071675636, -0.095871344, 0.030141056, 0.025107188, 0.06309942,-0.09951727, 0.031446133, -0.07888463, -0.08182623, -0.063762315, -0.0955399, -0.03948375, -0.031073254,-0.29979473, -0.06711823, 0.07743455, 0.0074161496, 0.01248109, -0.0812462, -0.0676154, 3.0668652e-05,-0.17931339, 0.2008575, 0.026950868, 0.11111795, 0.07507298, -0.00898017, 0.038800135, -0.015267325,-0.13788238, -0.010740988, -0.00870569, -0.037743647, -0.12445874, 0.014676933, -0.27344462, -0.05738194,0.07101274, -0.023615673, 0.0040369336, -0.039028008, 0.06546099, -0.072421394, -0.17119291,-0.019358637, -0.05489608, 0.006199114, 0.0515816, 0.050048653, -0.034843475, 0.07847032, -0.13315925,0.025335059, -0.0432954";
}
static class Document {
@JsonProperty("id")
String id;
@JsonProperty("pk")
String pk;
@JsonProperty("text")
String text;
@JsonProperty("embedding")
double[] embedding;
@JsonProperty("score")
double score;
public Document(String id, String pk, String text, double[] embedding) {
this.id = id;
this.pk = pk;
this.text = text;
this.embedding = embedding;
}
public Document() {
}
public String getId() {
return id;
}
public double getScore() {
return score;
}
}
} |
nice, terminal case so make sense to be in error-level. | public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
} | final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; | public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM
= AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream");
private static final String TRY_COUNT_KEY = "tryCount";
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private final AmqpErrorContext errorContext;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param loggingContext Additional context to add to the logging scope.
*/
public AmqpChannelProcessor(String fullyQualifiedNamespace,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy,
Map<String, Object> loggingContext) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger
= new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("Setting next AMQP channel.");
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("Channel is now active.");
}
}, error -> {
setAndClearChannel();
onError(error);
}, () -> {
if (isDisposed()) {
logger.info("Channel is disposed.");
} else {
logger.info("Channel is closed. Requesting upstream.");
setAndClearChannel();
requestUpstream();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
/**
* When downstream or upstream encounters an error, calculates whether to request another item upstream.
*
* @param throwable Exception to analyse.
*/
@Override
public void onError(Throwable throwable) {
Objects.requireNonNull(throwable, "'throwable' is required.");
if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) {
logger.warning("Retry is already pending. Ignoring transient error.", throwable);
return;
}
final int attemptsMade = retryAttempts.getAndIncrement();
final int attempts;
final Duration retryInterval;
if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient())
|| (throwable instanceof IllegalStateException)
|| (throwable instanceof RejectedExecutionException)) {
attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries());
final Throwable throwableToUse = throwable instanceof AmqpException
? throwable
: new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext);
retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts);
} else {
attempts = attemptsMade;
retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts);
}
if (retryInterval != null) {
if (isRetryPending.getAndSet(true)) {
retryAttempts.decrementAndGet();
return;
}
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.addKeyValue(INTERVAL_KEY, retryInterval.toMillis())
.log("Transient error occurred. Retrying.", throwable);
retrySubscription = Mono.delay(retryInterval).subscribe(i -> {
if (isDisposed()) {
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Not requesting from upstream. Processor is disposed.");
} else {
logger.atInfo().addKeyValue(TRY_COUNT_KEY, attemptsMade).log("Requesting from upstream.");
requestUpstream();
isRetryPending.set(false);
}
});
} else {
logger.atError()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Retry attempts exhausted or exception was not retriable.", throwable);
lastError = throwable;
isDisposed.set(true);
dispose();
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onError(throwable));
}
}
}
@Override
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsWarning(
new IllegalStateException("Cannot subscribe. Processor is already terminated.")));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscriber.onAdd();
subscribers.add(subscriber);
if (!isRetryPending.get()) {
requestUpstream();
}
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("Connection exists, not requesting another.");
return;
} else if (isDisposed()) {
logger.verbose("Is already disposed.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("There is no upstream subscription.");
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("Connection not requested, yet. Requesting one.");
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
/**
* Checks the current state of the channel for this channel and returns true if the channel is null or if this
* processor is disposed.
*
* @return true if the current channel in the processor is null or if the processor is disposed
*/
public boolean isChannelClosed() {
synchronized (lock) {
return currentChannel == null || isDisposed();
}
}
private void close(T channel) {
if (channel instanceof AsyncCloseable) {
((AsyncCloseable) channel).closeAsync().subscribe();
} else if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing AutoCloseable channel.", error);
}
} else if (channel instanceof Disposable) {
try {
((Disposable) channel).dispose();
} catch (Exception error) {
logger.warning("Error occurred closing Disposable channel.", error);
}
}
}
/**
* Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor.
* These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor.
* The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives
* a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified,
* which removes itself from the tracking list, then propagates the notification to the wrapped subscriber.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Added subscriber.");
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM
= AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream");
private static final String TRY_COUNT_KEY = "tryCount";
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private final AmqpErrorContext errorContext;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param entityPath The entity path for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param logger The logger to use for this processor.
* @deprecated Use constructor overload that does not take {@link ClientLogger}
*/
@Deprecated
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
Map<String, Object> loggingContext = new HashMap<>(1);
loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));
this.logger = new ClientLogger(getClass(), loggingContext);
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param loggingContext Additional context to add to the logging scope.
*/
public AmqpChannelProcessor(String fullyQualifiedNamespace,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy,
Map<String, Object> loggingContext) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger
= new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("Setting next AMQP channel.");
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("Channel is now active.");
}
}, error -> {
setAndClearChannel();
onError(error);
}, () -> {
if (isDisposed()) {
logger.info("Channel is disposed.");
} else {
logger.info("Channel is closed. Requesting upstream.");
setAndClearChannel();
requestUpstream();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
/**
* When downstream or upstream encounters an error, calculates whether to request another item upstream.
*
* @param throwable Exception to analyse.
*/
@Override
public void onError(Throwable throwable) {
Objects.requireNonNull(throwable, "'throwable' is required.");
if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) {
logger.warning("Retry is already pending. Ignoring transient error.", throwable);
return;
}
final int attemptsMade = retryAttempts.getAndIncrement();
final int attempts;
final Duration retryInterval;
if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient())
|| (throwable instanceof IllegalStateException)
|| (throwable instanceof RejectedExecutionException)) {
attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries());
final Throwable throwableToUse = throwable instanceof AmqpException
? throwable
: new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext);
retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts);
} else {
attempts = attemptsMade;
retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts);
}
if (retryInterval != null) {
if (isRetryPending.getAndSet(true)) {
retryAttempts.decrementAndGet();
return;
}
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.addKeyValue(INTERVAL_KEY, retryInterval.toMillis())
.log("Transient error occurred. Retrying.", throwable);
retrySubscription = Mono.delay(retryInterval).subscribe(i -> {
if (isDisposed()) {
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Not requesting from upstream. Processor is disposed.");
} else {
logger.atInfo().addKeyValue(TRY_COUNT_KEY, attemptsMade).log("Requesting from upstream.");
requestUpstream();
isRetryPending.set(false);
}
});
} else {
logger.atError()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Retry attempts exhausted or exception was not retriable.", throwable);
lastError = throwable;
isDisposed.set(true);
dispose();
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onError(throwable));
}
}
}
@Override
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
IllegalStateException error
= new IllegalStateException("Cannot subscribe. Processor is already terminated.");
Operators.error(actual, logger.logExceptionAsWarning(error));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscriber.onAdd();
subscribers.add(subscriber);
if (!isRetryPending.get()) {
requestUpstream();
}
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("Connection exists, not requesting another.");
return;
} else if (isDisposed()) {
logger.verbose("Is already disposed.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("There is no upstream subscription.");
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("Connection not requested, yet. Requesting one.");
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
/**
* Checks the current state of the channel for this channel and returns true if the channel is null or if this
* processor is disposed.
*
* @return true if the current channel in the processor is null or if the processor is disposed
*/
public boolean isChannelClosed() {
synchronized (lock) {
return currentChannel == null || isDisposed();
}
}
private void close(T channel) {
if (channel instanceof AsyncCloseable) {
((AsyncCloseable) channel).closeAsync().subscribe();
} else if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing AutoCloseable channel.", error);
}
} else if (channel instanceof Disposable) {
try {
((Disposable) channel).dispose();
} catch (Exception error) {
logger.warning("Error occurred closing Disposable channel.", error);
}
}
}
/**
* Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor.
* These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor.
* The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives
* a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified,
* which removes itself from the tracking list, then propagates the notification to the wrapped subscriber.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Added subscriber.");
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
}
} |
I've use the customization to add back the preview ServiceVersion. However, I think they should not be included in the SDK release, when target is stable api-version. | public void customizeEventGridClientImplImports(LibraryCustomization customization, Logger logger) {
Arrays.asList("com.azure.messaging.eventgrid.namespaces", "com.azure.messaging.eventgrid.namespaces.models").forEach(p -> {
logger.info("Working on " + p);
PackageCustomization packageCustomization = customization.getPackage(p);
packageCustomization.listClasses().forEach(c -> {
c.customizeAst(comp -> {
if (comp.getImports().removeIf(i -> i.getNameAsString().equals("com.azure.messaging.eventgrid.namespaces.models.CloudEvent"))) {
logger.info("Removed CloudEvent import from " + c.getClassName());
comp.addImport("com.azure.core.models.CloudEvent");
}
});
});
});
customization.getRawEditor().removeFile("src/main/java/com/azure/messaging/eventgrid/namespaces/models/PublishResult.java");
customization.getRawEditor().removeFile("src/main/java/com/azure/messaging/eventgrid/namespaces/models/CloudEvent.java");
PackageCustomization packageCustomization = customization.getPackage("com.azure.messaging.eventgrid.namespaces");
ClassCustomization classCustomization = packageCustomization.getClass("EventGridServiceVersion");
classCustomization.customizeAst(compilationUnit -> {
EnumDeclaration clazz = compilationUnit.getEnumByName("EventGridServiceVersion").get();
clazz.getEntries().add(0, new EnumConstantDeclaration("/**\n" +
" * Enum value 2023-10-01-preview.\n" +
" */\n" +
"V2023_10_01_PREVIEW(\"2023-10-01-preview\")"));
clazz.getEntries().add(0, new EnumConstantDeclaration("/**\n" +
" * Enum value 2023-06-01-preview.\n" +
" */\n" +
"V2023_06_01_PREVIEW(\"2023-06-01-preview\")"));
});
} | }); | public void customizeEventGridClientImplImports(LibraryCustomization customization, Logger logger) {
Arrays.asList("com.azure.messaging.eventgrid.namespaces", "com.azure.messaging.eventgrid.namespaces.models").forEach(p -> {
logger.info("Working on " + p);
PackageCustomization packageCustomization = customization.getPackage(p);
packageCustomization.listClasses().forEach(c -> {
c.customizeAst(comp -> {
if (comp.getImports().removeIf(i -> i.getNameAsString().equals("com.azure.messaging.eventgrid.namespaces.models.CloudEvent"))) {
logger.info("Removed CloudEvent import from " + c.getClassName());
comp.addImport("com.azure.core.models.CloudEvent");
}
});
});
});
customization.getRawEditor().removeFile("src/main/java/com/azure/messaging/eventgrid/namespaces/models/PublishResult.java");
customization.getRawEditor().removeFile("src/main/java/com/azure/messaging/eventgrid/namespaces/models/CloudEvent.java");
PackageCustomization packageCustomization = customization.getPackage("com.azure.messaging.eventgrid.namespaces");
ClassCustomization classCustomization = packageCustomization.getClass("EventGridServiceVersion");
classCustomization.customizeAst(compilationUnit -> {
EnumDeclaration clazz = compilationUnit.getEnumByName("EventGridServiceVersion").get();
clazz.getEntries().add(0, new EnumConstantDeclaration("/**\n" +
" * Enum value 2023-10-01-preview.\n" +
" */\n" +
"V2023_10_01_PREVIEW(\"2023-10-01-preview\")"));
clazz.getEntries().add(0, new EnumConstantDeclaration("/**\n" +
" * Enum value 2023-06-01-preview.\n" +
" */\n" +
"V2023_06_01_PREVIEW(\"2023-06-01-preview\")"));
});
} | class EventGridCustomization extends Customization {
@Override
public void customize(LibraryCustomization customization, Logger logger) {
customizeEventGridClientImplImports(customization, logger);
}
} | class EventGridCustomization extends Customization {
@Override
public void customize(LibraryCustomization customization, Logger logger) {
customizeEventGridClientImplImports(customization, logger);
}
} |
I'd recommend against using `subscribe` within a reactive flow anywhere that isn't a sink-based call chain. This doesn't block and may result in race conditions or unexpected behaviors. | public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel) || httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
int contentLengthInt = contentLength.intValue();
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLengthInt);
response.getBody().subscribe(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), stream);
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, stream.toString(StandardCharsets.UTF_8)));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
});
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
} | response.getBody().subscribe(byteBuffer -> { | public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel) || httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
Mono<HttpResponse> responseMono = Mono.just(response);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
final HttpResponse bufferedResponse = response.buffer();
responseMono = FluxUtil.collectBytesInByteBufferStream(bufferedResponse.getBody()).map(bytes -> {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, new String(bytes, StandardCharsets.UTF_8)));
return bufferedResponse;
});
}
}
return responseMono.doOnNext(ignored -> logBuilder.log(RESPONSE_LOG_MESSAGE));
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
private void addBasicResponseProperties(ClientLogger logger, HttpResponseLoggingContext loggingOptions,
HttpResponse response, LoggingEventBuilder logBuilder) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, urlSanitizer.getRedactedUrl(response.getRequest().getUrl()))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
getAndLogContentLength(response.getHeaders(), logBuilder, logger);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, response.getBodyAsBinaryData().toString()));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
private void addBasicResponseProperties(ClientLogger logger, HttpResponseLoggingContext loggingOptions,
HttpResponse response, LoggingEventBuilder logBuilder) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, urlSanitizer.getRedactedUrl(response.getRequest().getUrl()))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
getAndLogContentLength(response.getHeaders(), logBuilder, logger);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
response = response.buffer();
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, response.getBodyAsBinaryData().toString()));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} |
We can simplify our usage to `BinaryData` only by using `BinaryData.writeTo(AsynchronousByteChannel)` and creating an `AccessibleByteArrayOutputStream`-based `AsynchronousByteChannel`. | public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel) || httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
int contentLengthInt = contentLength.intValue();
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLengthInt);
response.getBody().subscribe(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), stream);
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, stream.toString(StandardCharsets.UTF_8)));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
});
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
} | ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), stream); | public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel) || httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
Mono<HttpResponse> responseMono = Mono.just(response);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
final HttpResponse bufferedResponse = response.buffer();
responseMono = FluxUtil.collectBytesInByteBufferStream(bufferedResponse.getBody()).map(bytes -> {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, new String(bytes, StandardCharsets.UTF_8)));
return bufferedResponse;
});
}
}
return responseMono.doOnNext(ignored -> logBuilder.log(RESPONSE_LOG_MESSAGE));
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
private void addBasicResponseProperties(ClientLogger logger, HttpResponseLoggingContext loggingOptions,
HttpResponse response, LoggingEventBuilder logBuilder) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, urlSanitizer.getRedactedUrl(response.getRequest().getUrl()))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
getAndLogContentLength(response.getHeaders(), logBuilder, logger);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, response.getBodyAsBinaryData().toString()));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
private void addBasicResponseProperties(ClientLogger logger, HttpResponseLoggingContext loggingOptions,
HttpResponse response, LoggingEventBuilder logBuilder) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, urlSanitizer.getRedactedUrl(response.getRequest().getUrl()))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
getAndLogContentLength(response.getHeaders(), logBuilder, logger);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
response = response.buffer();
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, response.getBodyAsBinaryData().toString()));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} |
Need to buffer the HTTP response here as well. | public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, response.getBodyAsBinaryData().toString()));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
} | logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, | public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
response = response.buffer();
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, response.getBodyAsBinaryData().toString()));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel) || httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
Mono<HttpResponse> responseMono = Mono.just(response);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
final HttpResponse bufferedResponse = response.buffer();
responseMono = FluxUtil.collectBytesInByteBufferStream(bufferedResponse.getBody()).map(bytes -> {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, new String(bytes, StandardCharsets.UTF_8)));
return bufferedResponse;
});
}
}
return responseMono.doOnNext(ignored -> logBuilder.log(RESPONSE_LOG_MESSAGE));
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
private void addBasicResponseProperties(ClientLogger logger, HttpResponseLoggingContext loggingOptions,
HttpResponse response, LoggingEventBuilder logBuilder) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, urlSanitizer.getRedactedUrl(response.getRequest().getUrl()))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
getAndLogContentLength(response.getHeaders(), logBuilder, logger);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
@Override
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel) || httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
addBasicResponseProperties(logger, loggingOptions, response, logBuilder);
Long contentLength = getAndLogContentLength(response.getHeaders(), logBuilder, logger);
Mono<HttpResponse> responseMono = Mono.just(response);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
final HttpResponse bufferedResponse = response.buffer();
responseMono = FluxUtil.collectBytesInByteBufferStream(bufferedResponse.getBody()).map(bytes -> {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody,
contentTypeHeader, new String(bytes, StandardCharsets.UTF_8)));
return bufferedResponse;
});
}
}
return responseMono.doOnNext(ignored -> logBuilder.log(RESPONSE_LOG_MESSAGE));
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
private void addBasicResponseProperties(ClientLogger logger, HttpResponseLoggingContext loggingOptions,
HttpResponse response, LoggingEventBuilder logBuilder) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, urlSanitizer.getRedactedUrl(response.getRequest().getUrl()))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
getAndLogContentLength(response.getHeaders(), logBuilder, logger);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder,
disableRedactedHeaderLogging);
}
}
@Override
} |
I recall Fabric team mentioning to go ahead with default 400 RUs, no? I know we have seen problems with throttling with 400 RUs, but curious are they blockers? | private boolean shouldCreateMetadataContainerIfNotExists() {
return this.config.getMetadataConfig().getStorageType() == CosmosMetadataStorageType.COSMOS
&& (this.config.getAccountConfig().getCosmosAuthConfig() instanceof CosmosMasterKeyAuthConfig);
} | private boolean shouldCreateMetadataContainerIfNotExists() {
return this.config.getMetadataConfig().getStorageType() == CosmosMetadataStorageType.COSMOS
&& (this.config.getAccountConfig().getCosmosAuthConfig() instanceof CosmosMasterKeyAuthConfig);
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private static final String CONNECTOR_NAME = "name";
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
private String connectorName;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.connectorName = props.containsKey(CONNECTOR_NAME) ? props.get(CONNECTOR_NAME).toString() : "EMPTY";
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig(), connectorName);
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
connectorName,
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.onErrorResume(throwable -> {
if (KafkaCosmosExceptionsHelper.isNotFoundException(throwable)
&& shouldCreateMetadataContainerIfNotExists()) {
return createMetadataContainer();
}
return Mono.error(new ConnectException(throwable));
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
}
private Mono<CosmosContainerResponse> createMetadataContainer() {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.createContainer(
this.config.getMetadataConfig().getStorageName(),
"/id",
ThroughputProperties.createAutoscaledThroughput(4000));
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
taskConfigs.put(CosmosSourceTaskConfig.SOURCE_TASK_ID,
String.format("%s-%s-%d",
"source",
this.connectorName,
RandomUtils.nextInt(1, 9999999)));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.connectorName,
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId(), this.connectorName)
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
FeedRangeContinuationTopicOffset continuationTopicOffset = this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedFeedRangesFromOffset.get(0)
);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedFeedRangesFromOffset.get(0), null);
} else {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(continuationTopicOffset, containerFeedRange));
}
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
FeedRangeContinuationTopicOffset continuationTopicOffset =
this.kafkaOffsetStorageReader
.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedRangeFromOffset);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedRangeFromOffset, null);
} else {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private static final String CONNECTOR_NAME = "name";
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
private String connectorName;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.connectorName = props.containsKey(CONNECTOR_NAME) ? props.get(CONNECTOR_NAME).toString() : "EMPTY";
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig(), connectorName);
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
connectorName,
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.onErrorResume(throwable -> {
if (KafkaCosmosExceptionsHelper.isNotFoundException(throwable)
&& shouldCreateMetadataContainerIfNotExists()) {
return createMetadataContainer();
}
return Mono.error(new ConnectException(throwable));
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
}
private Mono<CosmosContainerResponse> createMetadataContainer() {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.createContainer(
this.config.getMetadataConfig().getStorageName(),
"/id",
ThroughputProperties.createAutoscaledThroughput(4000));
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
taskConfigs.put(CosmosSourceTaskConfig.SOURCE_TASK_ID,
String.format("%s-%s-%d",
"source",
this.connectorName,
RandomUtils.nextInt(1, 9999999)));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.connectorName,
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId(), this.connectorName)
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
FeedRangeContinuationTopicOffset continuationTopicOffset = this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedFeedRangesFromOffset.get(0)
);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedFeedRangesFromOffset.get(0), null);
} else {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(continuationTopicOffset, containerFeedRange));
}
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
FeedRangeContinuationTopicOffset continuationTopicOffset =
this.kafkaOffsetStorageReader
.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedRangeFromOffset);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedRangeFromOffset, null);
} else {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | |
Better to put this RU in a constant variable and reference it here. | private Mono<CosmosContainerResponse> createMetadataContainer() {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.createContainer(
this.config.getMetadataConfig().getStorageName(),
"/id",
ThroughputProperties.createAutoscaledThroughput(4000));
} | ThroughputProperties.createAutoscaledThroughput(4000)); | private Mono<CosmosContainerResponse> createMetadataContainer() {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.createContainer(
this.config.getMetadataConfig().getStorageName(),
"/id",
ThroughputProperties.createAutoscaledThroughput(4000));
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private static final String CONNECTOR_NAME = "name";
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
private String connectorName;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.connectorName = props.containsKey(CONNECTOR_NAME) ? props.get(CONNECTOR_NAME).toString() : "EMPTY";
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig(), connectorName);
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
connectorName,
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.onErrorResume(throwable -> {
if (KafkaCosmosExceptionsHelper.isNotFoundException(throwable)
&& shouldCreateMetadataContainerIfNotExists()) {
return createMetadataContainer();
}
return Mono.error(new ConnectException(throwable));
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
}
private boolean shouldCreateMetadataContainerIfNotExists() {
return this.config.getMetadataConfig().getStorageType() == CosmosMetadataStorageType.COSMOS
&& (this.config.getAccountConfig().getCosmosAuthConfig() instanceof CosmosMasterKeyAuthConfig);
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
taskConfigs.put(CosmosSourceTaskConfig.SOURCE_TASK_ID,
String.format("%s-%s-%d",
"source",
this.connectorName,
RandomUtils.nextInt(1, 9999999)));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.connectorName,
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId(), this.connectorName)
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
FeedRangeContinuationTopicOffset continuationTopicOffset = this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedFeedRangesFromOffset.get(0)
);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedFeedRangesFromOffset.get(0), null);
} else {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(continuationTopicOffset, containerFeedRange));
}
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
FeedRangeContinuationTopicOffset continuationTopicOffset =
this.kafkaOffsetStorageReader
.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedRangeFromOffset);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedRangeFromOffset, null);
} else {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private static final String CONNECTOR_NAME = "name";
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
private String connectorName;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.connectorName = props.containsKey(CONNECTOR_NAME) ? props.get(CONNECTOR_NAME).toString() : "EMPTY";
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig(), connectorName);
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
connectorName,
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.onErrorResume(throwable -> {
if (KafkaCosmosExceptionsHelper.isNotFoundException(throwable)
&& shouldCreateMetadataContainerIfNotExists()) {
return createMetadataContainer();
}
return Mono.error(new ConnectException(throwable));
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
}
private boolean shouldCreateMetadataContainerIfNotExists() {
return this.config.getMetadataConfig().getStorageType() == CosmosMetadataStorageType.COSMOS
&& (this.config.getAccountConfig().getCosmosAuthConfig() instanceof CosmosMasterKeyAuthConfig);
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
taskConfigs.put(CosmosSourceTaskConfig.SOURCE_TASK_ID,
String.format("%s-%s-%d",
"source",
this.connectorName,
RandomUtils.nextInt(1, 9999999)));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.connectorName,
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId(), this.connectorName)
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
FeedRangeContinuationTopicOffset continuationTopicOffset = this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedFeedRangesFromOffset.get(0)
);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedFeedRangesFromOffset.get(0), null);
} else {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(continuationTopicOffset, containerFeedRange));
}
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
FeedRangeContinuationTopicOffset continuationTopicOffset =
this.kafkaOffsetStorageReader
.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedRangeFromOffset);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedRangeFromOffset, null);
} else {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} |
they proposed using 400RUs, but we decided to use autoscale 4000RUs | private boolean shouldCreateMetadataContainerIfNotExists() {
return this.config.getMetadataConfig().getStorageType() == CosmosMetadataStorageType.COSMOS
&& (this.config.getAccountConfig().getCosmosAuthConfig() instanceof CosmosMasterKeyAuthConfig);
} | private boolean shouldCreateMetadataContainerIfNotExists() {
return this.config.getMetadataConfig().getStorageType() == CosmosMetadataStorageType.COSMOS
&& (this.config.getAccountConfig().getCosmosAuthConfig() instanceof CosmosMasterKeyAuthConfig);
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private static final String CONNECTOR_NAME = "name";
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
private String connectorName;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.connectorName = props.containsKey(CONNECTOR_NAME) ? props.get(CONNECTOR_NAME).toString() : "EMPTY";
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig(), connectorName);
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
connectorName,
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.onErrorResume(throwable -> {
if (KafkaCosmosExceptionsHelper.isNotFoundException(throwable)
&& shouldCreateMetadataContainerIfNotExists()) {
return createMetadataContainer();
}
return Mono.error(new ConnectException(throwable));
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
}
private Mono<CosmosContainerResponse> createMetadataContainer() {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.createContainer(
this.config.getMetadataConfig().getStorageName(),
"/id",
ThroughputProperties.createAutoscaledThroughput(4000));
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
taskConfigs.put(CosmosSourceTaskConfig.SOURCE_TASK_ID,
String.format("%s-%s-%d",
"source",
this.connectorName,
RandomUtils.nextInt(1, 9999999)));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.connectorName,
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId(), this.connectorName)
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
FeedRangeContinuationTopicOffset continuationTopicOffset = this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedFeedRangesFromOffset.get(0)
);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedFeedRangesFromOffset.get(0), null);
} else {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(continuationTopicOffset, containerFeedRange));
}
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
FeedRangeContinuationTopicOffset continuationTopicOffset =
this.kafkaOffsetStorageReader
.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedRangeFromOffset);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedRangeFromOffset, null);
} else {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private static final String CONNECTOR_NAME = "name";
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
private String connectorName;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.connectorName = props.containsKey(CONNECTOR_NAME) ? props.get(CONNECTOR_NAME).toString() : "EMPTY";
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig(), connectorName);
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
connectorName,
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.onErrorResume(throwable -> {
if (KafkaCosmosExceptionsHelper.isNotFoundException(throwable)
&& shouldCreateMetadataContainerIfNotExists()) {
return createMetadataContainer();
}
return Mono.error(new ConnectException(throwable));
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
}
private Mono<CosmosContainerResponse> createMetadataContainer() {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.createContainer(
this.config.getMetadataConfig().getStorageName(),
"/id",
ThroughputProperties.createAutoscaledThroughput(4000));
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
taskConfigs.put(CosmosSourceTaskConfig.SOURCE_TASK_ID,
String.format("%s-%s-%d",
"source",
this.connectorName,
RandomUtils.nextInt(1, 9999999)));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.connectorName,
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId(), this.connectorName)
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
FeedRangeContinuationTopicOffset continuationTopicOffset = this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedFeedRangesFromOffset.get(0)
);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedFeedRangesFromOffset.get(0), null);
} else {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(continuationTopicOffset, containerFeedRange));
}
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
FeedRangeContinuationTopicOffset continuationTopicOffset =
this.kafkaOffsetStorageReader
.getFeedRangeContinuationOffset(
databaseName,
containerRid,
overlappedRangeFromOffset);
if (continuationTopicOffset == null) {
effectiveContinuationMap.put(overlappedRangeFromOffset, null);
} else {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | |
Need to change toString method so the flag used is logged out. | static void initialize() {
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.setCosmosClientTelemetryConfigAccessor(
new ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor() {
@Override
public Duration getHttpNetworkRequestTimeout(CosmosClientTelemetryConfig config) {
return config.getHttpNetworkRequestTimeout();
}
@Override
public int getMaxConnectionPoolSize(CosmosClientTelemetryConfig config) {
return config.getMaxConnectionPoolSize();
}
@Override
public Duration getIdleHttpConnectionTimeout(CosmosClientTelemetryConfig config) {
return config.getIdleHttpConnectionTimeout();
}
@Override
public ProxyOptions getProxy(CosmosClientTelemetryConfig config) {
return config.getProxy();
}
@Override
public EnumSet<MetricCategory> getMetricCategories(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getMetricCategories();
}
@Override
public EnumSet<TagName> getMetricTagNames(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultTagNames();
}
@Override
public CosmosMeterOptions getMeterOptions(
CosmosClientTelemetryConfig config,
CosmosMetricName name) {
if (config != null &&
config.micrometerMetricsOptions != null) {
return config.micrometerMetricsOptions.getMeterOptions(name);
}
return createDisabledMeterOptions(name);
}
@Override
public CosmosMeterOptions createDisabledMeterOptions(CosmosMetricName name) {
return new CosmosMeterOptions(
name,
false,
new double[0],
false,
EnumSet.noneOf(TagName.class),
false);
}
@Override
public String getClientCorrelationId(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationId();
}
@Override
public MeterRegistry getClientMetricRegistry(CosmosClientTelemetryConfig config) {
if (!config.isClientMetricsEnabled) {
return null;
}
return config.getClientMetricRegistry();
}
@Override
public Boolean isSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
return config.isSendClientTelemetryToServiceEnabled();
}
@Override
public boolean isClientMetricsEnabled(CosmosClientTelemetryConfig config) {
return config.isClientMetricsEnabled;
}
@Override
public CosmosClientTelemetryConfig createSnapshot(
CosmosClientTelemetryConfig config,
boolean effectiveIsClientTelemetryEnabled) {
return config.setEffectiveIsClientTelemetryEnabled(effectiveIsClientTelemetryEnabled);
}
@Override
public Collection<CosmosDiagnosticsHandler> getDiagnosticHandlers(CosmosClientTelemetryConfig config) {
return config.getDiagnosticHandlers();
}
@Override
public void setAccountName(CosmosClientTelemetryConfig config, String accountName) {
config.setAccountName(accountName);
}
@Override
public String getAccountName(CosmosClientTelemetryConfig config) {
return config.getAccountName();
}
@Override
public void setClientCorrelationTag(CosmosClientTelemetryConfig config, Tag clientCorrelationTag) {
config.setClientCorrelationTag(clientCorrelationTag);
}
@Override
public Tag getClientCorrelationTag(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationTag();
}
@Override
public void setClientTelemetry(CosmosClientTelemetryConfig config, ClientTelemetry clientTelemetry) {
config.clientTelemetry = clientTelemetry;
}
@Override
public ClientTelemetry getClientTelemetry(CosmosClientTelemetryConfig config) {
return config.clientTelemetry;
}
@Override
public void addDiagnosticsHandler(CosmosClientTelemetryConfig config,
CosmosDiagnosticsHandler handler) {
for (CosmosDiagnosticsHandler existingHandler : config.diagnosticHandlers) {
if (existingHandler.getClass().getCanonicalName().equals(handler.getClass().getCanonicalName())) {
return;
}
}
config.diagnosticHandlers.add(handler);
}
@Override
public void resetIsSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
config.resetIsSendClientTelemetryToServiceEnabled();
}
@Override
public CosmosDiagnosticsThresholds getDiagnosticsThresholds(CosmosClientTelemetryConfig config) {
return config.diagnosticsThresholds;
}
@Override
public boolean isLegacyTracingEnabled(CosmosClientTelemetryConfig config) {
return config.useLegacyOpenTelemetryTracing;
}
@Override
public boolean isTransportLevelTracingEnabled(CosmosClientTelemetryConfig config) {
return config.isTransportLevelTracingEnabled;
}
@Override
public Tracer getOrCreateTracer(CosmosClientTelemetryConfig config) {
return config.getOrCreateTracer();
}
@Override
public void setUseLegacyTracing(CosmosClientTelemetryConfig config, boolean useLegacyTracing) {
config.setUseLegacyOpenTelemetryTracing(useLegacyTracing);
}
@Override
public void setTracer(CosmosClientTelemetryConfig config, Tracer tracer) {
if (tracer != null) {
config.tracer = tracer;
}
}
@Override
public double getSamplingRate(CosmosClientTelemetryConfig config) {
return config.samplingRate;
}
@Override
public boolean showQueryStatement(CosmosClientTelemetryConfig config) {
return config.showQueryStatement;
}
@Override
public double[] getDefaultPercentiles(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultPercentiles();
}
@Override
public boolean shouldPublishHistograms(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldPublishHistograms();
}
@Override
public boolean shouldApplyDiagnosticThresholdsForTransportLevelMeters(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldApplyDiagnosticThresholdsForTransportLevelMeters();
}
});
} | static void initialize() {
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.setCosmosClientTelemetryConfigAccessor(
new ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor() {
@Override
public Duration getHttpNetworkRequestTimeout(CosmosClientTelemetryConfig config) {
return config.getHttpNetworkRequestTimeout();
}
@Override
public int getMaxConnectionPoolSize(CosmosClientTelemetryConfig config) {
return config.getMaxConnectionPoolSize();
}
@Override
public Duration getIdleHttpConnectionTimeout(CosmosClientTelemetryConfig config) {
return config.getIdleHttpConnectionTimeout();
}
@Override
public ProxyOptions getProxy(CosmosClientTelemetryConfig config) {
return config.getProxy();
}
@Override
public EnumSet<MetricCategory> getMetricCategories(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getMetricCategories();
}
@Override
public EnumSet<TagName> getMetricTagNames(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultTagNames();
}
@Override
public CosmosMeterOptions getMeterOptions(
CosmosClientTelemetryConfig config,
CosmosMetricName name) {
if (config != null &&
config.micrometerMetricsOptions != null) {
return config.micrometerMetricsOptions.getMeterOptions(name);
}
return createDisabledMeterOptions(name);
}
@Override
public CosmosMeterOptions createDisabledMeterOptions(CosmosMetricName name) {
return new CosmosMeterOptions(
name,
false,
new double[0],
false,
EnumSet.noneOf(TagName.class),
false);
}
@Override
public String getClientCorrelationId(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationId();
}
@Override
public MeterRegistry getClientMetricRegistry(CosmosClientTelemetryConfig config) {
if (!config.isClientMetricsEnabled) {
return null;
}
return config.getClientMetricRegistry();
}
@Override
public Boolean isSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
return config.isSendClientTelemetryToServiceEnabled();
}
@Override
public boolean isClientMetricsEnabled(CosmosClientTelemetryConfig config) {
return config.isClientMetricsEnabled;
}
@Override
public CosmosClientTelemetryConfig createSnapshot(
CosmosClientTelemetryConfig config,
boolean effectiveIsClientTelemetryEnabled) {
return config.setEffectiveIsClientTelemetryEnabled(effectiveIsClientTelemetryEnabled);
}
@Override
public Collection<CosmosDiagnosticsHandler> getDiagnosticHandlers(CosmosClientTelemetryConfig config) {
return config.getDiagnosticHandlers();
}
@Override
public void setAccountName(CosmosClientTelemetryConfig config, String accountName) {
config.setAccountName(accountName);
}
@Override
public String getAccountName(CosmosClientTelemetryConfig config) {
return config.getAccountName();
}
@Override
public void setClientCorrelationTag(CosmosClientTelemetryConfig config, Tag clientCorrelationTag) {
config.setClientCorrelationTag(clientCorrelationTag);
}
@Override
public Tag getClientCorrelationTag(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationTag();
}
@Override
public void setClientTelemetry(CosmosClientTelemetryConfig config, ClientTelemetry clientTelemetry) {
config.clientTelemetry = clientTelemetry;
}
@Override
public ClientTelemetry getClientTelemetry(CosmosClientTelemetryConfig config) {
return config.clientTelemetry;
}
@Override
public void addDiagnosticsHandler(CosmosClientTelemetryConfig config,
CosmosDiagnosticsHandler handler) {
for (CosmosDiagnosticsHandler existingHandler : config.diagnosticHandlers) {
if (existingHandler.getClass().getCanonicalName().equals(handler.getClass().getCanonicalName())) {
return;
}
}
config.diagnosticHandlers.add(handler);
}
@Override
public void resetIsSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
config.resetIsSendClientTelemetryToServiceEnabled();
}
@Override
public CosmosDiagnosticsThresholds getDiagnosticsThresholds(CosmosClientTelemetryConfig config) {
return config.diagnosticsThresholds;
}
@Override
public boolean isLegacyTracingEnabled(CosmosClientTelemetryConfig config) {
return config.useLegacyOpenTelemetryTracing;
}
@Override
public boolean isTransportLevelTracingEnabled(CosmosClientTelemetryConfig config) {
return config.isTransportLevelTracingEnabled;
}
@Override
public Tracer getOrCreateTracer(CosmosClientTelemetryConfig config) {
return config.getOrCreateTracer();
}
@Override
public void setUseLegacyTracing(CosmosClientTelemetryConfig config, boolean useLegacyTracing) {
config.setUseLegacyOpenTelemetryTracing(useLegacyTracing);
}
@Override
public void setTracer(CosmosClientTelemetryConfig config, Tracer tracer) {
if (tracer != null) {
config.tracer = tracer;
}
}
@Override
public double getSamplingRate(CosmosClientTelemetryConfig config) {
return config.samplingRate;
}
@Override
public ShowQueryOptions showQueryOptions(CosmosClientTelemetryConfig config) {
return config.showQueryOptions;
}
@Override
public double[] getDefaultPercentiles(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultPercentiles();
}
@Override
public boolean shouldPublishHistograms(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldPublishHistograms();
}
@Override
public boolean shouldApplyDiagnosticThresholdsForTransportLevelMeters(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldApplyDiagnosticThresholdsForTransportLevelMeters();
}
});
} | class outside of this package. | class outside of this package. | |
added. | static void initialize() {
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.setCosmosClientTelemetryConfigAccessor(
new ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor() {
@Override
public Duration getHttpNetworkRequestTimeout(CosmosClientTelemetryConfig config) {
return config.getHttpNetworkRequestTimeout();
}
@Override
public int getMaxConnectionPoolSize(CosmosClientTelemetryConfig config) {
return config.getMaxConnectionPoolSize();
}
@Override
public Duration getIdleHttpConnectionTimeout(CosmosClientTelemetryConfig config) {
return config.getIdleHttpConnectionTimeout();
}
@Override
public ProxyOptions getProxy(CosmosClientTelemetryConfig config) {
return config.getProxy();
}
@Override
public EnumSet<MetricCategory> getMetricCategories(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getMetricCategories();
}
@Override
public EnumSet<TagName> getMetricTagNames(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultTagNames();
}
@Override
public CosmosMeterOptions getMeterOptions(
CosmosClientTelemetryConfig config,
CosmosMetricName name) {
if (config != null &&
config.micrometerMetricsOptions != null) {
return config.micrometerMetricsOptions.getMeterOptions(name);
}
return createDisabledMeterOptions(name);
}
@Override
public CosmosMeterOptions createDisabledMeterOptions(CosmosMetricName name) {
return new CosmosMeterOptions(
name,
false,
new double[0],
false,
EnumSet.noneOf(TagName.class),
false);
}
@Override
public String getClientCorrelationId(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationId();
}
@Override
public MeterRegistry getClientMetricRegistry(CosmosClientTelemetryConfig config) {
if (!config.isClientMetricsEnabled) {
return null;
}
return config.getClientMetricRegistry();
}
@Override
public Boolean isSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
return config.isSendClientTelemetryToServiceEnabled();
}
@Override
public boolean isClientMetricsEnabled(CosmosClientTelemetryConfig config) {
return config.isClientMetricsEnabled;
}
@Override
public CosmosClientTelemetryConfig createSnapshot(
CosmosClientTelemetryConfig config,
boolean effectiveIsClientTelemetryEnabled) {
return config.setEffectiveIsClientTelemetryEnabled(effectiveIsClientTelemetryEnabled);
}
@Override
public Collection<CosmosDiagnosticsHandler> getDiagnosticHandlers(CosmosClientTelemetryConfig config) {
return config.getDiagnosticHandlers();
}
@Override
public void setAccountName(CosmosClientTelemetryConfig config, String accountName) {
config.setAccountName(accountName);
}
@Override
public String getAccountName(CosmosClientTelemetryConfig config) {
return config.getAccountName();
}
@Override
public void setClientCorrelationTag(CosmosClientTelemetryConfig config, Tag clientCorrelationTag) {
config.setClientCorrelationTag(clientCorrelationTag);
}
@Override
public Tag getClientCorrelationTag(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationTag();
}
@Override
public void setClientTelemetry(CosmosClientTelemetryConfig config, ClientTelemetry clientTelemetry) {
config.clientTelemetry = clientTelemetry;
}
@Override
public ClientTelemetry getClientTelemetry(CosmosClientTelemetryConfig config) {
return config.clientTelemetry;
}
@Override
public void addDiagnosticsHandler(CosmosClientTelemetryConfig config,
CosmosDiagnosticsHandler handler) {
for (CosmosDiagnosticsHandler existingHandler : config.diagnosticHandlers) {
if (existingHandler.getClass().getCanonicalName().equals(handler.getClass().getCanonicalName())) {
return;
}
}
config.diagnosticHandlers.add(handler);
}
@Override
public void resetIsSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
config.resetIsSendClientTelemetryToServiceEnabled();
}
@Override
public CosmosDiagnosticsThresholds getDiagnosticsThresholds(CosmosClientTelemetryConfig config) {
return config.diagnosticsThresholds;
}
@Override
public boolean isLegacyTracingEnabled(CosmosClientTelemetryConfig config) {
return config.useLegacyOpenTelemetryTracing;
}
@Override
public boolean isTransportLevelTracingEnabled(CosmosClientTelemetryConfig config) {
return config.isTransportLevelTracingEnabled;
}
@Override
public Tracer getOrCreateTracer(CosmosClientTelemetryConfig config) {
return config.getOrCreateTracer();
}
@Override
public void setUseLegacyTracing(CosmosClientTelemetryConfig config, boolean useLegacyTracing) {
config.setUseLegacyOpenTelemetryTracing(useLegacyTracing);
}
@Override
public void setTracer(CosmosClientTelemetryConfig config, Tracer tracer) {
if (tracer != null) {
config.tracer = tracer;
}
}
@Override
public double getSamplingRate(CosmosClientTelemetryConfig config) {
return config.samplingRate;
}
@Override
public boolean showQueryStatement(CosmosClientTelemetryConfig config) {
return config.showQueryStatement;
}
@Override
public double[] getDefaultPercentiles(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultPercentiles();
}
@Override
public boolean shouldPublishHistograms(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldPublishHistograms();
}
@Override
public boolean shouldApplyDiagnosticThresholdsForTransportLevelMeters(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldApplyDiagnosticThresholdsForTransportLevelMeters();
}
});
} | static void initialize() {
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.setCosmosClientTelemetryConfigAccessor(
new ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor() {
@Override
public Duration getHttpNetworkRequestTimeout(CosmosClientTelemetryConfig config) {
return config.getHttpNetworkRequestTimeout();
}
@Override
public int getMaxConnectionPoolSize(CosmosClientTelemetryConfig config) {
return config.getMaxConnectionPoolSize();
}
@Override
public Duration getIdleHttpConnectionTimeout(CosmosClientTelemetryConfig config) {
return config.getIdleHttpConnectionTimeout();
}
@Override
public ProxyOptions getProxy(CosmosClientTelemetryConfig config) {
return config.getProxy();
}
@Override
public EnumSet<MetricCategory> getMetricCategories(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getMetricCategories();
}
@Override
public EnumSet<TagName> getMetricTagNames(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultTagNames();
}
@Override
public CosmosMeterOptions getMeterOptions(
CosmosClientTelemetryConfig config,
CosmosMetricName name) {
if (config != null &&
config.micrometerMetricsOptions != null) {
return config.micrometerMetricsOptions.getMeterOptions(name);
}
return createDisabledMeterOptions(name);
}
@Override
public CosmosMeterOptions createDisabledMeterOptions(CosmosMetricName name) {
return new CosmosMeterOptions(
name,
false,
new double[0],
false,
EnumSet.noneOf(TagName.class),
false);
}
@Override
public String getClientCorrelationId(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationId();
}
@Override
public MeterRegistry getClientMetricRegistry(CosmosClientTelemetryConfig config) {
if (!config.isClientMetricsEnabled) {
return null;
}
return config.getClientMetricRegistry();
}
@Override
public Boolean isSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
return config.isSendClientTelemetryToServiceEnabled();
}
@Override
public boolean isClientMetricsEnabled(CosmosClientTelemetryConfig config) {
return config.isClientMetricsEnabled;
}
@Override
public CosmosClientTelemetryConfig createSnapshot(
CosmosClientTelemetryConfig config,
boolean effectiveIsClientTelemetryEnabled) {
return config.setEffectiveIsClientTelemetryEnabled(effectiveIsClientTelemetryEnabled);
}
@Override
public Collection<CosmosDiagnosticsHandler> getDiagnosticHandlers(CosmosClientTelemetryConfig config) {
return config.getDiagnosticHandlers();
}
@Override
public void setAccountName(CosmosClientTelemetryConfig config, String accountName) {
config.setAccountName(accountName);
}
@Override
public String getAccountName(CosmosClientTelemetryConfig config) {
return config.getAccountName();
}
@Override
public void setClientCorrelationTag(CosmosClientTelemetryConfig config, Tag clientCorrelationTag) {
config.setClientCorrelationTag(clientCorrelationTag);
}
@Override
public Tag getClientCorrelationTag(CosmosClientTelemetryConfig config) {
return config.getClientCorrelationTag();
}
@Override
public void setClientTelemetry(CosmosClientTelemetryConfig config, ClientTelemetry clientTelemetry) {
config.clientTelemetry = clientTelemetry;
}
@Override
public ClientTelemetry getClientTelemetry(CosmosClientTelemetryConfig config) {
return config.clientTelemetry;
}
@Override
public void addDiagnosticsHandler(CosmosClientTelemetryConfig config,
CosmosDiagnosticsHandler handler) {
for (CosmosDiagnosticsHandler existingHandler : config.diagnosticHandlers) {
if (existingHandler.getClass().getCanonicalName().equals(handler.getClass().getCanonicalName())) {
return;
}
}
config.diagnosticHandlers.add(handler);
}
@Override
public void resetIsSendClientTelemetryToServiceEnabled(CosmosClientTelemetryConfig config) {
config.resetIsSendClientTelemetryToServiceEnabled();
}
@Override
public CosmosDiagnosticsThresholds getDiagnosticsThresholds(CosmosClientTelemetryConfig config) {
return config.diagnosticsThresholds;
}
@Override
public boolean isLegacyTracingEnabled(CosmosClientTelemetryConfig config) {
return config.useLegacyOpenTelemetryTracing;
}
@Override
public boolean isTransportLevelTracingEnabled(CosmosClientTelemetryConfig config) {
return config.isTransportLevelTracingEnabled;
}
@Override
public Tracer getOrCreateTracer(CosmosClientTelemetryConfig config) {
return config.getOrCreateTracer();
}
@Override
public void setUseLegacyTracing(CosmosClientTelemetryConfig config, boolean useLegacyTracing) {
config.setUseLegacyOpenTelemetryTracing(useLegacyTracing);
}
@Override
public void setTracer(CosmosClientTelemetryConfig config, Tracer tracer) {
if (tracer != null) {
config.tracer = tracer;
}
}
@Override
public double getSamplingRate(CosmosClientTelemetryConfig config) {
return config.samplingRate;
}
@Override
public ShowQueryOptions showQueryOptions(CosmosClientTelemetryConfig config) {
return config.showQueryOptions;
}
@Override
public double[] getDefaultPercentiles(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.getDefaultPercentiles();
}
@Override
public boolean shouldPublishHistograms(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldPublishHistograms();
}
@Override
public boolean shouldApplyDiagnosticThresholdsForTransportLevelMeters(CosmosClientTelemetryConfig config) {
return config.micrometerMetricsOptions.shouldApplyDiagnosticThresholdsForTransportLevelMeters();
}
});
} | class outside of this package. | class outside of this package. | |
We should add reading the env var here before calling the client ctor. | public HttpClient build() {
OkHttpClient.Builder httpClientBuilder
= this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder();
for (Interceptor interceptor : this.networkInterceptors) {
httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor);
}
httpClientBuilder = httpClientBuilder.connectTimeout(getTimeout(connectionTimeout, DEFAULT_CONNECT_TIMEOUT))
.writeTimeout(getTimeout(writeTimeout, DEFAULT_WRITE_TIMEOUT))
.readTimeout(getTimeout(readTimeout, DEFAULT_READ_TIMEOUT));
if (callTimeout != null) {
httpClientBuilder.callTimeout(callTimeout);
}
if (this.connectionPool != null) {
httpClientBuilder = httpClientBuilder.connectionPool(connectionPool);
}
if (this.dispatcher != null) {
httpClientBuilder = httpClientBuilder.dispatcher(dispatcher);
}
Configuration buildConfiguration
= (configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
ProxyOptions buildProxyOptions
= (proxyOptions == null) ? ProxyOptions.fromConfiguration(buildConfiguration, true) : proxyOptions;
if (buildProxyOptions != null) {
httpClientBuilder
= httpClientBuilder.proxySelector(new OkHttpProxySelector(buildProxyOptions.getType().toProxyType(),
buildProxyOptions::getAddress, buildProxyOptions.getNonProxyHosts()));
if (buildProxyOptions.getUsername() != null) {
ProxyAuthenticator proxyAuthenticator
= new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword());
httpClientBuilder = httpClientBuilder.proxyAuthenticator(proxyAuthenticator)
.addInterceptor(proxyAuthenticator.getProxyAuthenticationInfoInterceptor());
}
}
httpClientBuilder.followRedirects(this.followRedirects);
return new OkHttpAsyncHttpClient(httpClientBuilder.build(), responseTimeout);
} | return new OkHttpAsyncHttpClient(httpClientBuilder.build(), responseTimeout); | public HttpClient build() {
OkHttpClient.Builder httpClientBuilder
= this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder();
for (Interceptor interceptor : this.networkInterceptors) {
httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor);
}
httpClientBuilder = httpClientBuilder.connectTimeout(getTimeout(connectionTimeout, getDefaultConnectTimeout()))
.writeTimeout(getTimeout(writeTimeout, getDefaultWriteTimeout()))
.readTimeout(getTimeout(readTimeout, getDefaultReadTimeout()));
if (callTimeout != null) {
httpClientBuilder.callTimeout(callTimeout);
}
if (this.connectionPool != null) {
httpClientBuilder = httpClientBuilder.connectionPool(connectionPool);
}
if (this.dispatcher != null) {
httpClientBuilder = httpClientBuilder.dispatcher(dispatcher);
}
Configuration buildConfiguration
= (configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
ProxyOptions buildProxyOptions
= (proxyOptions == null) ? ProxyOptions.fromConfiguration(buildConfiguration, true) : proxyOptions;
if (buildProxyOptions != null) {
httpClientBuilder
= httpClientBuilder.proxySelector(new OkHttpProxySelector(buildProxyOptions.getType().toProxyType(),
buildProxyOptions::getAddress, buildProxyOptions.getNonProxyHosts()));
if (buildProxyOptions.getUsername() != null) {
ProxyAuthenticator proxyAuthenticator
= new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword());
httpClientBuilder = httpClientBuilder.proxyAuthenticator(proxyAuthenticator)
.addInterceptor(proxyAuthenticator.getProxyAuthenticationInfoInterceptor());
}
}
httpClientBuilder.followRedirects(this.followRedirects);
return new OkHttpAsyncHttpClient(httpClientBuilder.build(),
getTimeout(responseTimeout, getDefaultResponseTimeout()));
} | class OkHttpAsyncHttpClientBuilder {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClientBuilder.class);
private final okhttp3.OkHttpClient okHttpClient;
private static final Duration MINIMUM_TIMEOUT = Duration.ofMillis(1);
private static final Duration DEFAULT_CONNECT_TIMEOUT;
private static final Duration DEFAULT_WRITE_TIMEOUT;
private static final Duration DEFAULT_RESPONSE_TIMEOUT;
private static final Duration DEFAULT_READ_TIMEOUT;
static {
ClientLogger logger = new ClientLogger(OkHttpAsyncHttpClientBuilder.class);
Configuration configuration = Configuration.getGlobalConfiguration();
DEFAULT_CONNECT_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration,
PROPERTY_AZURE_REQUEST_CONNECT_TIMEOUT, Duration.ofSeconds(10), logger);
DEFAULT_WRITE_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration, PROPERTY_AZURE_REQUEST_WRITE_TIMEOUT,
Duration.ofSeconds(60), logger);
DEFAULT_RESPONSE_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration,
PROPERTY_AZURE_REQUEST_RESPONSE_TIMEOUT, Duration.ofSeconds(60), logger);
DEFAULT_READ_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration, PROPERTY_AZURE_REQUEST_READ_TIMEOUT,
Duration.ofSeconds(60), logger);
}
private List<Interceptor> networkInterceptors = new ArrayList<>();
private Duration readTimeout;
private Duration responseTimeout;
private Duration writeTimeout;
private Duration connectionTimeout;
private Duration callTimeout;
private ConnectionPool connectionPool;
private Dispatcher dispatcher;
private ProxyOptions proxyOptions;
private Configuration configuration;
private boolean followRedirects;
/**
* Creates OkHttpAsyncHttpClientBuilder.
*/
public OkHttpAsyncHttpClientBuilder() {
this.okHttpClient = null;
}
/**
* Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient.
*
* @param okHttpClient the httpclient
*/
public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) {
this.okHttpClient = Objects.requireNonNull(okHttpClient, "'okHttpClient' cannot be null.");
}
/**
* Add a network layer interceptor to Http request pipeline.
*
* @param networkInterceptor the interceptor to add
* @return the updated OkHttpAsyncHttpClientBuilder object
*/
public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) {
Objects.requireNonNull(networkInterceptor, "'networkInterceptor' cannot be null.");
this.networkInterceptors.add(networkInterceptor);
return this;
}
/**
* Add network layer interceptors to Http request pipeline.
* <p>
* This replaces all previously-set interceptors.
*
* @param networkInterceptors The interceptors to add.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) {
this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "'networkInterceptors' cannot be null.");
return this;
}
/**
* Sets the read timeout duration used when reading the server response.
* <p>
* The read timeout begins once the first response read is triggered after the server response is received. This
* timeout triggers periodically but won't fire its operation if another read operation has completed between when
* the timeout is triggered and completes.
* <p>
* If {@code readTimeout} is null or {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout period will be
* applied to response read. When applying the timeout the greatest of one millisecond and the value of {@code
* readTimeout} will be used.
*
* @param readTimeout Read timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) {
this.readTimeout = readTimeout;
return this;
}
/**
* Sets the response timeout duration used when waiting for a server to reply.
* <p>
* The response timeout begins once the request write completes and finishes once the first response read is
* triggered when the server response is received.
* <p>
* If {@code responseTimeout} is null either {@link Configuration
* 60-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied to the response. When applying the timeout the greatest of one millisecond and the value of {@code
* responseTimeout} will be used.
* <p>
* Given OkHttp doesn't have an equivalent timeout for just responses, this is handled manually.
*
* @param responseTimeout Response timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder responseTimeout(Duration responseTimeout) {
this.responseTimeout = responseTimeout;
return this;
}
/**
* Sets the writing timeout for a request to be sent.
* <p>
* The writing timeout does not apply to the entire request but to the request being sent over the wire. For example
* a request body which emits {@code 10} {@code 8KB} buffers will trigger {@code 10} write operations, the last
* write tracker will update when each operation completes and the outbound buffer will be periodically checked to
* determine if it is still draining.
* <p>
* If {@code writeTimeout} is null either {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no write timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code writeTimeout} will be
* used.
*
* @param writeTimeout Write operation timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder writeTimeout(Duration writeTimeout) {
this.writeTimeout = writeTimeout;
return this;
}
/**
* Sets the connection timeout for a request to be sent.
* <p>
* The connection timeout begins once the request attempts to connect to the remote host and finishes once the
* connection is resolved.
* <p>
* If {@code connectTimeout} is null either {@link Configuration
* 10-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code connectTimeout} will
* be used.
* <p>
* By default, the connection timeout is 10 seconds.
*
* @param connectionTimeout Connect timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) {
this.connectionTimeout = connectionTimeout;
return this;
}
/**
* Sets the default timeout for complete calls.
* <p>
* The call timeout spans the entire call: resolving DNS, connecting, writing the request body,
* server processing, and reading the response body.
* <p>
* Null or {@link Duration
* must be between 1 and {@link Integer
* <p>
* By default, call timeout is not enabled.
*
* @param callTimeout Call timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder callTimeout(Duration callTimeout) {
if (callTimeout != null && callTimeout.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'callTimeout' cannot be negative"));
}
this.callTimeout = callTimeout;
return this;
}
/**
* Sets the Http connection pool.
*
* @param connectionPool The OkHttp connection pool to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) {
this.connectionPool = Objects.requireNonNull(connectionPool, "'connectionPool' cannot be null.");
return this;
}
/**
* Sets the dispatcher that also composes the thread pool for executing HTTP requests.
*
* @param dispatcher The dispatcher to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) {
this.dispatcher = Objects.requireNonNull(dispatcher, "'dispatcher' cannot be null.");
return this;
}
/**
* Sets the proxy.
*
* @param proxyOptions The proxy configuration to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the HTTP client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* <p>Sets the followRedirect flag on the underlying OkHttp-backed {@link com.azure.core.http.HttpClient}.</p>
*
* <p>If this is set to 'true' redirects will be followed automatically, and
* if your HTTP pipeline is configured with a redirect policy it will not be called.</p>
*
* @param followRedirects The followRedirects value to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder followRedirects(boolean followRedirects) {
this.followRedirects = followRedirects;
return this;
}
/**
* Creates a new OkHttp-backed {@link com.azure.core.http.HttpClient} instance on every call, using the
* configuration set in the builder at the time of the build method call.
*
* @return A new OkHttp-backed {@link com.azure.core.http.HttpClient} instance.
*/
/*
* Returns the timeout in milliseconds to use based on the passed Duration and default timeout.
*
* If the timeout is {@code null} the default timeout will be used. If the timeout is less than or equal to zero
* no timeout will be used. If the timeout is less than one millisecond a timeout of one millisecond will be used.
*/
static Duration getTimeout(Duration configuredTimeout, Duration defaultTimeout) {
if (configuredTimeout == null) {
return defaultTimeout;
}
if (configuredTimeout.isZero() || configuredTimeout.isNegative()) {
return Duration.ZERO;
}
if (configuredTimeout.compareTo(MINIMUM_TIMEOUT) < 0) {
return MINIMUM_TIMEOUT;
} else {
return configuredTimeout;
}
}
} | class OkHttpAsyncHttpClientBuilder {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClientBuilder.class);
private final okhttp3.OkHttpClient okHttpClient;
private List<Interceptor> networkInterceptors = new ArrayList<>();
private Duration readTimeout;
private Duration responseTimeout;
private Duration writeTimeout;
private Duration connectionTimeout;
private Duration callTimeout;
private ConnectionPool connectionPool;
private Dispatcher dispatcher;
private ProxyOptions proxyOptions;
private Configuration configuration;
private boolean followRedirects;
/**
* Creates OkHttpAsyncHttpClientBuilder.
*/
public OkHttpAsyncHttpClientBuilder() {
this.okHttpClient = null;
}
/**
* Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient.
*
* @param okHttpClient the httpclient
*/
public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) {
this.okHttpClient = Objects.requireNonNull(okHttpClient, "'okHttpClient' cannot be null.");
}
/**
* Add a network layer interceptor to Http request pipeline.
*
* @param networkInterceptor the interceptor to add
* @return the updated OkHttpAsyncHttpClientBuilder object
*/
public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) {
Objects.requireNonNull(networkInterceptor, "'networkInterceptor' cannot be null.");
this.networkInterceptors.add(networkInterceptor);
return this;
}
/**
* Add network layer interceptors to Http request pipeline.
* <p>
* This replaces all previously-set interceptors.
*
* @param networkInterceptors The interceptors to add.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) {
this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "'networkInterceptors' cannot be null.");
return this;
}
/**
* Sets the read timeout duration used when reading the server response.
* <p>
* The read timeout begins once the first response read is triggered after the server response is received. This
* timeout triggers periodically but won't fire its operation if another read operation has completed between when
* the timeout is triggered and completes.
* <p>
* If {@code readTimeout} is null or {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout period will be
* applied to response read. When applying the timeout the greatest of one millisecond and the value of {@code
* readTimeout} will be used.
*
* @param readTimeout Read timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) {
this.readTimeout = readTimeout;
return this;
}
/**
* Sets the response timeout duration used when waiting for a server to reply.
* <p>
* The response timeout begins once the request write completes and finishes once the first response read is
* triggered when the server response is received.
* <p>
* If {@code responseTimeout} is null either {@link Configuration
* 60-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied to the response. When applying the timeout the greatest of one millisecond and the value of {@code
* responseTimeout} will be used.
* <p>
* Given OkHttp doesn't have an equivalent timeout for just responses, this is handled manually.
*
* @param responseTimeout Response timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder responseTimeout(Duration responseTimeout) {
this.responseTimeout = responseTimeout;
return this;
}
/**
* Sets the writing timeout for a request to be sent.
* <p>
* The writing timeout does not apply to the entire request but to the request being sent over the wire. For example
* a request body which emits {@code 10} {@code 8KB} buffers will trigger {@code 10} write operations, the last
* write tracker will update when each operation completes and the outbound buffer will be periodically checked to
* determine if it is still draining.
* <p>
* If {@code writeTimeout} is null either {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no write timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code writeTimeout} will be
* used.
*
* @param writeTimeout Write operation timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder writeTimeout(Duration writeTimeout) {
this.writeTimeout = writeTimeout;
return this;
}
/**
* Sets the connection timeout for a request to be sent.
* <p>
* The connection timeout begins once the request attempts to connect to the remote host and finishes once the
* connection is resolved.
* <p>
* If {@code connectTimeout} is null either {@link Configuration
* 10-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code connectTimeout} will
* be used.
* <p>
* By default, the connection timeout is 10 seconds.
*
* @param connectionTimeout Connect timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) {
this.connectionTimeout = connectionTimeout;
return this;
}
/**
* Sets the default timeout for complete calls.
* <p>
* The call timeout spans the entire call: resolving DNS, connecting, writing the request body,
* server processing, and reading the response body.
* <p>
* Null or {@link Duration
* must be between 1 and {@link Integer
* <p>
* By default, call timeout is not enabled.
*
* @param callTimeout Call timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder callTimeout(Duration callTimeout) {
if (callTimeout != null && callTimeout.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'callTimeout' cannot be negative"));
}
this.callTimeout = callTimeout;
return this;
}
/**
* Sets the Http connection pool.
*
* @param connectionPool The OkHttp connection pool to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) {
this.connectionPool = Objects.requireNonNull(connectionPool, "'connectionPool' cannot be null.");
return this;
}
/**
* Sets the dispatcher that also composes the thread pool for executing HTTP requests.
*
* @param dispatcher The dispatcher to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) {
this.dispatcher = Objects.requireNonNull(dispatcher, "'dispatcher' cannot be null.");
return this;
}
/**
* Sets the proxy.
*
* @param proxyOptions The proxy configuration to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the HTTP client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* <p>Sets the followRedirect flag on the underlying OkHttp-backed {@link com.azure.core.http.HttpClient}.</p>
*
* <p>If this is set to 'true' redirects will be followed automatically, and
* if your HTTP pipeline is configured with a redirect policy it will not be called.</p>
*
* @param followRedirects The followRedirects value to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder followRedirects(boolean followRedirects) {
this.followRedirects = followRedirects;
return this;
}
/**
* Creates a new OkHttp-backed {@link com.azure.core.http.HttpClient} instance on every call, using the
* configuration set in the builder at the time of the build method call.
*
* @return A new OkHttp-backed {@link com.azure.core.http.HttpClient} instance.
*/
} |
Isn't idle timeout a little different from response timeout? From their docs, this will close the connection if the connection doesn't send or receive any data: https://vertx.io/docs/apidocs/io/vertx/core/http/HttpClientOptions.html#setIdleTimeout-int- | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(HttpUtils.AZURE_IGNORE_RESPONSE_BODY).orElse(false);
Long responseTimeout = context.getData(HttpUtils.AZURE_RESPONSE_TIMEOUT)
.filter(timeoutDuration -> timeoutDuration instanceof Duration)
.map(timeoutDuration -> ((Duration) timeoutDuration).toMillis())
.orElse(null);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
RequestOptions options = new RequestOptions().setMethod(HttpMethod.valueOf(request.getHttpMethod().name()))
.setAbsoluteURI(request.getUrl());
if (responseTimeout != null) {
options.setIdleTimeout(responseTimeout);
}
return Mono.create(sink -> client.request(options, requestResult -> {
if (requestResult.failed()) {
sink.error(wrapVertxException(requestResult.cause()));
return;
}
HttpClientRequest vertxRequest = requestResult.result();
for (HttpHeader header : request.getHeaders()) {
vertxRequest.putHeader(header.getName(), header.getValuesList());
}
if (request.getHeaders().get(HttpHeaderName.CONTENT_LENGTH) == null) {
vertxRequest.setChunked(true);
}
vertxRequest.response(event -> {
if (event.succeeded()) {
HttpClientResponse vertxHttpResponse = event.result();
vertxHttpResponse.exceptionHandler(exception -> sink.error(wrapVertxException(exception)));
if (eagerlyReadResponse || ignoreResponseBody) {
vertxHttpResponse.body(bodyEvent -> {
if (bodyEvent.succeeded()) {
sink.success(
new BufferedVertxHttpResponse(request, vertxHttpResponse, bodyEvent.result()));
} else {
sink.error(wrapVertxException(bodyEvent.cause()));
}
});
} else {
sink.success(new VertxHttpAsyncResponse(request, vertxHttpResponse));
}
} else {
sink.error(wrapVertxException(event.cause()));
}
});
sendBody(sink, request, progressReporter, vertxRequest);
}));
} | options.setIdleTimeout(responseTimeout); | public Mono<HttpResponse> send(HttpRequest request, Context context) {
return Mono.deferContextual(contextView -> Mono.fromFuture(sendInternal(request, context, contextView)))
.onErrorMap(VertxUtils::wrapVertxException);
} | class VertxAsyncHttpClient implements HttpClient {
private final Vertx vertx;
final io.vertx.core.http.HttpClient client;
/**
* Constructs a {@link VertxAsyncHttpClient}.
*
* @param client The Vert.x {@link io.vertx.core.http.HttpClient}
*/
VertxAsyncHttpClient(io.vertx.core.http.HttpClient client, Vertx vertx) {
this.client = Objects.requireNonNull(client, "client cannot be null");
this.vertx = Objects.requireNonNull(vertx, "vertx cannot be null");
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
return send(request, context).block();
}
@SuppressWarnings("deprecation")
private void sendBody(MonoSink<HttpResponse> sink, HttpRequest azureRequest, ProgressReporter progressReporter,
HttpClientRequest vertxRequest) {
BinaryData body = azureRequest.getBodyAsBinaryData();
if (body == null) {
vertxRequest.send(result -> {
if (result.failed()) {
sink.error(wrapVertxException(result.cause()));
}
});
return;
}
BinaryDataContent bodyContent = BinaryDataHelper.getContent(body);
if (bodyContent instanceof ByteArrayContent
|| bodyContent instanceof StringContent
|| bodyContent instanceof SerializableContent) {
byte[] content = bodyContent.toBytes();
vertxRequest.send(Buffer.buffer(Unpooled.wrappedBuffer(content)), result -> {
if (result.succeeded()) {
reportProgress(content.length, progressReporter);
} else {
sink.error(wrapVertxException(result.cause()));
}
});
} else if (bodyContent instanceof ByteBufferContent) {
long contentLength = bodyContent.getLength();
vertxRequest.send(Buffer.buffer(Unpooled.wrappedBuffer(bodyContent.toByteBuffer())), result -> {
if (result.succeeded()) {
reportProgress(contentLength, progressReporter);
} else {
sink.error(wrapVertxException(result.cause()));
}
});
} else if (bodyContent instanceof FileContent) {
FileContent fileContent = (FileContent) bodyContent;
vertx.fileSystem().open(fileContent.getFile().toString(), new OpenOptions().setRead(true), event -> {
if (event.succeeded()) {
AsyncFile file = event.result();
file.setReadPos(fileContent.getPosition());
if (fileContent.getLength() != null) {
file.setReadLength(fileContent.getLength());
}
vertxRequest.send(file, result -> {
if (result.succeeded()) {
reportProgress(fileContent.getLength(), progressReporter);
} else {
sink.error(wrapVertxException(result.cause()));
}
});
} else {
sink.error(wrapVertxException(event.cause()));
}
});
} else {
azureRequest.getBody().subscribe(new VertxRequestWriteSubscriber(vertxRequest, sink, progressReporter));
}
}
private static void reportProgress(long progress, ProgressReporter progressReporter) {
if (progressReporter != null) {
progressReporter.reportProgress(progress);
}
}
} | class VertxAsyncHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(VertxAsyncHttpClient.class);
final io.vertx.core.http.HttpClient client;
private final Duration responseTimeout;
/**
* Constructs a {@link VertxAsyncHttpClient}.
*
* @param client The Vert.x {@link io.vertx.core.http.HttpClient}
*/
VertxAsyncHttpClient(io.vertx.core.http.HttpClient client, Duration responseTimeout) {
this.client = Objects.requireNonNull(client, "client cannot be null");
this.responseTimeout = responseTimeout;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
try {
return sendInternal(request, context, reactor.util.context.Context.empty()).get();
} catch (Exception e) {
Throwable mapped = e;
if (e instanceof ExecutionException) {
mapped = e.getCause();
}
mapped = VertxUtils.wrapVertxException(mapped);
if (mapped instanceof Error) {
throw LOGGER.logThrowableAsError((Error) mapped);
} else if (mapped instanceof IOException) {
throw LOGGER.logExceptionAsError(new UncheckedIOException((IOException) mapped));
} else if (mapped instanceof RuntimeException) {
throw LOGGER.logExceptionAsError((RuntimeException) mapped);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(mapped));
}
}
}
private CompletableFuture<HttpResponse> sendInternal(HttpRequest request, Context context,
ContextView contextView) {
boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(HttpUtils.AZURE_IGNORE_RESPONSE_BODY).orElse(false);
Duration perCallTimeout = (Duration) context.getData(HttpUtils.AZURE_RESPONSE_TIMEOUT)
.filter(timeoutDuration -> timeoutDuration instanceof Duration)
.orElse(responseTimeout);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
RequestOptions options = new RequestOptions().setMethod(HttpMethod.valueOf(request.getHttpMethod().name()))
.setAbsoluteURI(request.getUrl());
Promise<HttpResponse> promise = Promise.promise();
client.request(options, requestResult -> {
if (requestResult.failed()) {
promise.fail(requestResult.cause());
return;
}
HttpClientRequest vertxRequest = requestResult.result();
for (HttpHeader header : request.getHeaders()) {
vertxRequest.putHeader(header.getName(), header.getValuesList());
}
if (request.getHeaders().get(HttpHeaderName.CONTENT_LENGTH) == null) {
vertxRequest.setChunked(true);
}
Future<HttpClientResponse> responseFuture;
if (!perCallTimeout.isZero() && !perCallTimeout.isNegative()) {
responseFuture = vertxRequest.response().timeout(perCallTimeout.toMillis(), TimeUnit.MILLISECONDS);
} else {
responseFuture = vertxRequest.response();
}
responseFuture = responseFuture.onFailure(promise::fail);
if (eagerlyReadResponse || ignoreResponseBody) {
responseFuture.andThen(responseResult -> {
if (responseResult.failed()) {
promise.fail(responseResult.cause());
return;
}
HttpClientResponse vertxHttpResponse = responseResult.result();
vertxHttpResponse.body().andThen(bodyResult -> {
if (bodyResult.succeeded()) {
promise.complete(
new BufferedVertxHttpResponse(request, vertxHttpResponse, bodyResult.result()));
} else {
promise.fail(bodyResult.cause());
}
});
});
} else {
responseFuture.andThen(responseResult -> {
if (responseResult.succeeded()) {
promise.complete(new VertxHttpAsyncResponse(request, responseResult.result()));
} else {
promise.fail(responseResult.cause());
}
});
}
sendBody(contextView, request, progressReporter, vertxRequest, promise);
});
return promise.future().toCompletionStage().toCompletableFuture();
}
@SuppressWarnings("deprecation")
private void sendBody(ContextView contextView, HttpRequest azureRequest, ProgressReporter progressReporter,
HttpClientRequest vertxRequest, Promise<HttpResponse> promise) {
BinaryData body = azureRequest.getBodyAsBinaryData();
if (body == null) {
vertxRequest.send().onFailure(promise::fail);
} else {
BinaryDataContent bodyContent = BinaryDataHelper.getContent(body);
if (bodyContent instanceof ByteArrayContent
|| bodyContent instanceof ByteBufferContent
|| bodyContent instanceof StringContent
|| bodyContent instanceof SerializableContent) {
long contentLength = bodyContent.getLength();
vertxRequest.send(Buffer.buffer(Unpooled.wrappedBuffer(bodyContent.toByteBuffer())))
.onSuccess(ignored -> reportProgress(contentLength, progressReporter))
.onFailure(promise::fail);
} else {
azureRequest.getBody()
.subscribe(new VertxRequestWriteSubscriber(vertxRequest, promise, progressReporter, contextView));
}
}
}
private static void reportProgress(long progress, ProgressReporter progressReporter) {
if (progressReporter != null) {
progressReporter.reportProgress(progress);
}
}
} |
nit: HttpUtils can provide helper functions to reduce repetitive code | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(HttpUtils.AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders
= (boolean) context.getData(HttpUtils.AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
Duration perCallTimeout = (Duration) context.getData(HttpUtils.AZURE_RESPONSE_TIMEOUT)
.filter(timeoutDuration -> timeoutDuration instanceof Duration)
.orElse(responseTimeout);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
return Mono.create(sink -> sink.onRequest(value -> {
Mono.fromCallable(() -> toOkHttpRequest(request, progressReporter, perCallTimeout))
.subscribe(okHttpRequest -> {
try {
Call call = httpClient.newCall(okHttpRequest);
call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders));
sink.onCancel(call::cancel);
} catch (Exception ex) {
sink.error(ex);
}
}, sink::error);
}));
} | boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false); | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(HttpUtils.AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders
= (boolean) context.getData(HttpUtils.AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
Duration perCallTimeout = (Duration) context.getData(HttpUtils.AZURE_RESPONSE_TIMEOUT)
.filter(timeoutDuration -> timeoutDuration instanceof Duration)
.orElse(responseTimeout);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
return Mono.create(sink -> sink.onRequest(value -> {
Mono.fromCallable(() -> toOkHttpRequest(request, progressReporter, perCallTimeout))
.subscribe(okHttpRequest -> {
try {
Call call = httpClient.newCall(okHttpRequest);
call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders));
sink.onCancel(call::cancel);
} catch (Exception ex) {
sink.error(ex);
}
}, sink::error);
}));
} | class OkHttpAsyncHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
final OkHttpClient httpClient;
private final Duration responseTimeout;
OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration responseTimeout) {
EventListener.Factory factory = httpClient.eventListenerFactory();
this.httpClient
= httpClient.newBuilder().eventListenerFactory(new ResponseTimeoutListenerFactory(factory)).build();
this.responseTimeout = responseTimeout;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(HttpUtils.AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders
= (boolean) context.getData(HttpUtils.AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
Duration perCallTimeout = (Duration) context.getData(HttpUtils.AZURE_RESPONSE_TIMEOUT)
.filter(timeoutDuration -> timeoutDuration instanceof Duration)
.orElse(responseTimeout);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
Request okHttpRequest = toOkHttpRequest(request, progressReporter, perCallTimeout);
Call call = null;
try {
call = httpClient.newCall(okHttpRequest);
Response okHttpResponse = call.execute();
return toHttpResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(mapIOException(e, call)));
}
}
/**
* Current design for response timeout uses call cancellation which throws an IOException with message "canceled".
* This isn't what we want, we want an InterruptedIOException with message "timeout". Use information stored on the
* call to determine if the IOException should be mapped to an InterruptedIOException.
*
* @param e the IOException to map
* @param call the Call to associate with the new IOException
* @return the new IOException
*/
private static IOException mapIOException(IOException e, Call call) {
if (call == null) {
return e;
}
PerCallTimeoutCall perCallTimeoutCall = call.request().tag(PerCallTimeoutCall.class);
if (perCallTimeoutCall != null && perCallTimeoutCall.isTimedOut()) {
InterruptedIOException i = new InterruptedIOException("timedout");
i.addSuppressed(e);
return i;
}
return e;
}
/**
* Converts the given azure-core request to okhttp request.
*
* @param request the azure-core request
* @param progressReporter the {@link ProgressReporter}. Can be null.
* @param perCallTimeout the per call timeout
* @return the okhttp request
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request, ProgressReporter progressReporter,
Duration perCallTimeout) {
Request.Builder requestBuilder = new Request.Builder().url(request.getUrl());
if (perCallTimeout != null) {
requestBuilder.tag(PerCallTimeoutCall.class, new PerCallTimeoutCall(perCallTimeout.toMillis()));
}
if (request.getHeaders() != null) {
for (HttpHeader hdr : request.getHeaders()) {
hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders());
if (progressReporter != null) {
okHttpRequestBody = new OkHttpProgressReportingRequestBody(okHttpRequestBody, progressReporter);
}
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody).build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
* @return the Mono emitting okhttp request
*/
private RequestBody toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HttpHeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
BinaryDataContent content = BinaryDataHelper.getContent(bodyContent);
long effectiveContentLength = getRequestContentLength(content, headers);
if (content instanceof FluxByteBufferContent) {
return new OkHttpFluxRequestBody(content, effectiveContentLength, mediaType,
httpClient.callTimeoutMillis());
} else {
return new BinaryDataRequestBody(bodyContent, mediaType, effectiveContentLength);
}
}
private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private static HttpResponse toHttpResponse(HttpRequest request, okhttp3.Response response,
boolean eagerlyReadResponse, boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
/*
* Use a buffered response when we are eagerly reading the response from the network and the body isn't
* empty.
*/
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpAsyncBufferedResponse(response, request, bytes, eagerlyConvertHeaders);
}
} else {
return new OkHttpAsyncResponse(response, request, eagerlyConvertHeaders);
}
}
private static class OkHttpCallback implements okhttp3.Callback {
private final MonoSink<HttpResponse> sink;
private final HttpRequest request;
private final boolean eagerlyReadResponse;
private final boolean ignoreResponseBody;
private final boolean eagerlyConvertHeaders;
OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) {
this.sink = sink;
this.request = request;
this.eagerlyReadResponse = eagerlyReadResponse;
this.ignoreResponseBody = ignoreResponseBody;
this.eagerlyConvertHeaders = eagerlyConvertHeaders;
}
@SuppressWarnings("NullableProblems")
@Override
public void onFailure(okhttp3.Call call, IOException e) {
if (e.getSuppressed().length == 1) {
Throwable suppressed = e.getSuppressed()[0];
if (suppressed instanceof IOException) {
sink.error(mapIOException((IOException) suppressed, call));
} else {
sink.error(suppressed);
}
} else {
sink.error(mapIOException(e, call));
}
}
@SuppressWarnings("NullableProblems")
@Override
public void onResponse(okhttp3.Call call, okhttp3.Response response) {
try {
sink.success(
toHttpResponse(request, response, eagerlyReadResponse, ignoreResponseBody, eagerlyConvertHeaders));
} catch (IOException ex) {
sink.error(ex);
}
}
}
} | class OkHttpAsyncHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
final OkHttpClient httpClient;
private final Duration responseTimeout;
OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration responseTimeout) {
EventListener.Factory factory = httpClient.eventListenerFactory();
this.httpClient
= httpClient.newBuilder().eventListenerFactory(new ResponseTimeoutListenerFactory(factory)).build();
this.responseTimeout = responseTimeout;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(HttpUtils.AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(HttpUtils.AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders
= (boolean) context.getData(HttpUtils.AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
Duration perCallTimeout = (Duration) context.getData(HttpUtils.AZURE_RESPONSE_TIMEOUT)
.filter(timeoutDuration -> timeoutDuration instanceof Duration)
.orElse(responseTimeout);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
Request okHttpRequest = toOkHttpRequest(request, progressReporter, perCallTimeout);
Call call = null;
try {
call = httpClient.newCall(okHttpRequest);
Response okHttpResponse = call.execute();
return toHttpResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(mapIOException(e, call)));
}
}
/**
* Current design for response timeout uses call cancellation which throws an IOException with message "canceled".
* This isn't what we want, we want an InterruptedIOException with message "timeout". Use information stored on the
* call to determine if the IOException should be mapped to an InterruptedIOException.
*
* @param e the IOException to map
* @param call the Call to associate with the new IOException
* @return the new IOException
*/
private static IOException mapIOException(IOException e, Call call) {
if (call == null) {
return e;
}
PerCallTimeoutCall perCallTimeoutCall = call.request().tag(PerCallTimeoutCall.class);
if (perCallTimeoutCall != null && perCallTimeoutCall.isTimedOut()) {
InterruptedIOException i = new InterruptedIOException("timedout");
i.addSuppressed(e);
return i;
}
return e;
}
/**
* Converts the given azure-core request to okhttp request.
*
* @param request the azure-core request
* @param progressReporter the {@link ProgressReporter}. Can be null.
* @param perCallTimeout the per call timeout
* @return the okhttp request
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request, ProgressReporter progressReporter,
Duration perCallTimeout) {
Request.Builder requestBuilder = new Request.Builder().url(request.getUrl());
if (perCallTimeout != null) {
requestBuilder.tag(PerCallTimeoutCall.class, new PerCallTimeoutCall(perCallTimeout.toMillis()));
}
if (request.getHeaders() != null) {
for (HttpHeader hdr : request.getHeaders()) {
hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders());
if (progressReporter != null) {
okHttpRequestBody = new OkHttpProgressReportingRequestBody(okHttpRequestBody, progressReporter);
}
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody).build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
* @return the Mono emitting okhttp request
*/
private RequestBody toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HttpHeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
BinaryDataContent content = BinaryDataHelper.getContent(bodyContent);
long effectiveContentLength = getRequestContentLength(content, headers);
if (content instanceof FluxByteBufferContent) {
return new OkHttpFluxRequestBody(content, effectiveContentLength, mediaType,
httpClient.callTimeoutMillis());
} else {
return new BinaryDataRequestBody(bodyContent, mediaType, effectiveContentLength);
}
}
private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private static HttpResponse toHttpResponse(HttpRequest request, okhttp3.Response response,
boolean eagerlyReadResponse, boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
/*
* Use a buffered response when we are eagerly reading the response from the network and the body isn't
* empty.
*/
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpAsyncBufferedResponse(response, request, bytes, eagerlyConvertHeaders);
}
} else {
return new OkHttpAsyncResponse(response, request, eagerlyConvertHeaders);
}
}
private static class OkHttpCallback implements okhttp3.Callback {
private final MonoSink<HttpResponse> sink;
private final HttpRequest request;
private final boolean eagerlyReadResponse;
private final boolean ignoreResponseBody;
private final boolean eagerlyConvertHeaders;
OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) {
this.sink = sink;
this.request = request;
this.eagerlyReadResponse = eagerlyReadResponse;
this.ignoreResponseBody = ignoreResponseBody;
this.eagerlyConvertHeaders = eagerlyConvertHeaders;
}
@SuppressWarnings("NullableProblems")
@Override
public void onFailure(okhttp3.Call call, IOException e) {
if (e.getSuppressed().length == 1) {
Throwable suppressed = e.getSuppressed()[0];
if (suppressed instanceof IOException) {
sink.error(mapIOException((IOException) suppressed, call));
} else {
sink.error(suppressed);
}
} else {
sink.error(mapIOException(e, call));
}
}
@SuppressWarnings("NullableProblems")
@Override
public void onResponse(okhttp3.Call call, okhttp3.Response response) {
try {
sink.success(
toHttpResponse(request, response, eagerlyReadResponse, ignoreResponseBody, eagerlyConvertHeaders));
} catch (IOException ex) {
sink.error(ex);
}
}
}
} |
nit: use the `List` interface ```suggestion List<InputTextItem> content = new ArrayList<>(); ``` | public Mono<List<TranslatedTextItem>> translate(List<String> texts, TranslateOptions translateOptions) {
ArrayList<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(new InputTextItem(text));
}
return translate(translateOptions.getTargetLanguages(), content, translateOptions.getClientTraceId(),
translateOptions.getSourceLanguage(), translateOptions.getTextType(), translateOptions.getCategory(),
translateOptions.getProfanityAction(), translateOptions.getProfanityMarker(),
translateOptions.isIncludeAlignment(), translateOptions.isIncludeSentenceLength(),
translateOptions.getSuggestedSourceLanguage(), translateOptions.getSourceLanguageScript(),
translateOptions.getTargetLanguageScript(), translateOptions.isAllowFallback());
} | ArrayList<InputTextItem> content = new ArrayList<>(); | new InputTextItem(text));
}
return translate(translateOptions.getTargetLanguages(), content, translateOptions.getClientTraceId(),
translateOptions.getSourceLanguage(), translateOptions.getTextType(), translateOptions.getCategory(),
translateOptions.getProfanityAction(), translateOptions.getProfanityMarker(),
translateOptions.isIncludeAlignment(), translateOptions.isIncludeSentenceLength(),
translateOptions.getSuggestedSourceLanguage(), translateOptions.getSourceLanguageScript(),
translateOptions.getTargetLanguageScript(), translateOptions.isAllowFallback());
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono} | class TextTranslationAsyncClient {
@Generated
private final TextTranslationClientImpl serviceClient;
/**
* Initializes an instance of TextTranslationAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
@Generated
TextTranslationAsyncClient(TextTranslationClientImpl serviceClient) {
this.serviceClient = serviceClient;
}
/**
* Translate Text.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>from</td><td>String</td><td>No</td><td>Specifies the language of the input text. Find which languages are
* available to translate from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.</td></tr>
* <tr><td>textType</td><td>String</td><td>No</td><td>Defines whether the text being translated is plain text or
* HTML text. Any HTML needs to be a well-formed,
* complete element. Possible values are: plain (default) or html. Allowed values: "Plain", "Html".</td></tr>
* <tr><td>category</td><td>String</td><td>No</td><td>A string specifying the category (domain) of the translation.
* This parameter is used to get translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.</td></tr>
* <tr><td>profanityAction</td><td>String</td><td>No</td><td>Specifies how profanities should be treated in
* translations.
* Possible values are: NoAction (default), Marked or Deleted. Allowed values: "NoAction", "Marked",
* "Deleted".</td></tr>
* <tr><td>profanityMarker</td><td>String</td><td>No</td><td>Specifies how profanities should be marked in
* translations.
* Possible values are: Asterisk (default) or Tag. . Allowed values: "Asterisk", "Tag".</td></tr>
* <tr><td>includeAlignment</td><td>Boolean</td><td>No</td><td>Specifies whether to include alignment projection
* from source text to translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>includeSentenceLength</td><td>Boolean</td><td>No</td><td>Specifies whether to include sentence boundaries
* for the input text and the translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>suggestedFrom</td><td>String</td><td>No</td><td>Specifies a fallback language if the language of the
* input text can't be identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.</td></tr>
* <tr><td>fromScript</td><td>String</td><td>No</td><td>Specifies the script of the input text.</td></tr>
* <tr><td>toScript</td><td>String</td><td>No</td><td>Specifies the script of the translated text.</td></tr>
* <tr><td>allowFallback</td><td>Boolean</td><td>No</td><td>Specifies that the service is allowed to fall back to a
* general system when a custom system doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't
* exist.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* translations (Required): [
* (Required){
* to: String (Required)
* text: String (Required)
* transliteration (Optional): {
* text: String (Required)
* script: String (Required)
* }
* alignment (Optional): {
* proj: String (Required)
* }
* sentLen (Optional): {
* srcSentLen (Required): [
* int (Required)
* ]
* transSentLen (Required): [
* int (Required)
* ]
* }
* }
* ]
* sourceText (Optional): {
* text: String (Required)
* }
* }
* ]
* }</pre>
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> translateWithResponse(List<String> targetLanguages, BinaryData requestBody,
RequestOptions requestOptions) {
return this.serviceClient.translateWithResponseAsync(targetLanguages, requestBody, requestOptions);
}
/**
* Transliterate Text.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* script: String (Required)
* }
* ]
* }</pre>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> transliterateWithResponse(String language, String sourceLanguageScript,
String targetLanguageScript, BinaryData requestBody, RequestOptions requestOptions) {
return this.serviceClient.transliterateWithResponseAsync(language, sourceLanguageScript, targetLanguageScript,
requestBody, requestOptions);
}
/**
* Find Sentence Boundaries.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>language</td><td>String</td><td>No</td><td>Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.</td></tr>
* <tr><td>script</td><td>String</td><td>No</td><td>Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* sentLen (Required): [
* int (Required)
* ]
* }
* ]
* }</pre>
*
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> findSentenceBoundariesWithResponse(BinaryData requestBody,
RequestOptions requestOptions) {
return this.serviceClient.findSentenceBoundariesWithResponseAsync(requestBody, requestOptions);
}
/**
* Lookup Dictionary Entries.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* displaySource: String (Required)
* translations (Required): [
* (Required){
* normalizedTarget: String (Required)
* displayTarget: String (Required)
* posTag: String (Required)
* confidence: double (Required)
* prefixWord: String (Required)
* backTranslations (Required): [
* (Required){
* normalizedText: String (Required)
* displayText: String (Required)
* numExamples: int (Required)
* frequencyCount: int (Required)
* }
* ]
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryEntriesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData requestBody, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryEntriesWithResponseAsync(sourceLanguage, targetLanguage, requestBody,
requestOptions);
}
/**
* Lookup Dictionary Examples.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* translation: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* normalizedTarget: String (Required)
* examples (Required): [
* (Required){
* sourcePrefix: String (Required)
* sourceTerm: String (Required)
* sourceSuffix: String (Required)
* targetPrefix: String (Required)
* targetTerm: String (Required)
* targetSuffix: String (Required)
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryExamplesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData requestBody, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryExamplesWithResponseAsync(sourceLanguage, targetLanguage, requestBody,
requestOptions);
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param sourceLanguage Specifies the language of the input text. Find which languages are available to translate
* from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.
* @param textType Defines whether the text being translated is plain text or HTML text. Any HTML needs to be a
* well-formed,
* complete element. Possible values are: plain (default) or html.
* @param category A string specifying the category (domain) of the translation. This parameter is used to get
* translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.
* @param profanityAction Specifies how profanities should be treated in translations.
* Possible values are: NoAction (default), Marked or Deleted.
* @param profanityMarker Specifies how profanities should be marked in translations.
* Possible values are: Asterisk (default) or Tag.
* @param includeAlignment Specifies whether to include alignment projection from source text to translated text.
* Possible values are: true or false (default).
* @param includeSentenceLength Specifies whether to include sentence boundaries for the input text and the
* translated text.
* Possible values are: true or false (default).
* @param suggestedSourceLanguage Specifies a fallback language if the language of the input text can't be
* identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.
* @param sourceLanguageScript Specifies the script of the input text.
* @param targetLanguageScript Specifies the script of the translated text.
* @param allowFallback Specifies that the service is allowed to fall back to a general system when a custom system
* doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't exist.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<TranslatedTextItem>> translate(List<String> targetLanguages, List<InputTextItem> requestBody,
String clientTraceId, String sourceLanguage, TextType textType, String category,
ProfanityAction profanityAction, ProfanityMarker profanityMarker, Boolean includeAlignment,
Boolean includeSentenceLength, String suggestedSourceLanguage, String sourceLanguageScript,
String targetLanguageScript, Boolean allowFallback) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (sourceLanguage != null) {
requestOptions.addQueryParam("from", sourceLanguage, false);
}
if (textType != null) {
requestOptions.addQueryParam("textType", textType.toString(), false);
}
if (category != null) {
requestOptions.addQueryParam("category", category, false);
}
if (profanityAction != null) {
requestOptions.addQueryParam("profanityAction", profanityAction.toString(), false);
}
if (profanityMarker != null) {
requestOptions.addQueryParam("profanityMarker", profanityMarker.toString(), false);
}
if (includeAlignment != null) {
requestOptions.addQueryParam("includeAlignment", String.valueOf(includeAlignment), false);
}
if (includeSentenceLength != null) {
requestOptions.addQueryParam("includeSentenceLength", String.valueOf(includeSentenceLength), false);
}
if (suggestedSourceLanguage != null) {
requestOptions.addQueryParam("suggestedFrom", suggestedSourceLanguage, false);
}
if (sourceLanguageScript != null) {
requestOptions.addQueryParam("fromScript", sourceLanguageScript, false);
}
if (targetLanguageScript != null) {
requestOptions.addQueryParam("toScript", targetLanguageScript, false);
}
if (allowFallback != null) {
requestOptions.addQueryParam("allowFallback", String.valueOf(allowFallback), false);
}
return translateWithResponse(targetLanguages, BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<TranslatedTextItem>> translateInner(List<String> targetLanguages,
List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return translateWithResponse(targetLanguages, BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
*
* @param targetLanguage Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String targetLanguage, List<String> texts) {
return translateInner(Arrays.asList(targetLanguage), convertTextToData(texts));
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String targetLanguages, String text) {
return translate(targetLanguages, Arrays.asList(text));
}
/**
* Translate Text.
*
* @param text Text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String text, TranslateOptions translateOptions) {
return translate(Arrays.asList(text), translateOptions);
}
/**
* Translate Text.
*
* @param texts List of text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(List<String> texts, TranslateOptions translateOptions) {
ArrayList<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<TransliteratedText>> transliterateInner(String language, String sourceLanguageScript,
String targetLanguageScript, List<InputTextItem> requestBody, String clientTraceId) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
return transliterateWithResponse(language, sourceLanguageScript, targetLanguageScript,
BinaryData.fromObject(requestBody), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT));
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, List<String> requestBody, String clientTraceId) {
return transliterateInner(language, sourceLanguageScript, targetLanguageScript, convertTextToData(requestBody),
clientTraceId);
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param text Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, String text, String clientTraceId) {
return transliterate(language, sourceLanguageScript, targetLanguageScript, Arrays.asList(text), clientTraceId);
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterateInner(String language, String sourceLanguageScript,
String targetLanguageScript, List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return transliterateWithResponse(language, sourceLanguageScript, targetLanguageScript,
BinaryData.fromObject(requestBody), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT));
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, List<String> requestBody) {
return transliterateInner(language, sourceLanguageScript, targetLanguageScript, convertTextToData(requestBody));
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, String text) {
return transliterate(language, sourceLanguageScript, targetLanguageScript, Arrays.asList(text));
}
/**
* Find Sentence Boundaries.
*
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> requestBody,
String clientTraceId, String language, String script) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (language != null) {
requestOptions.addQueryParam("language", language, false);
}
if (script != null) {
requestOptions.addQueryParam("script", script, false);
}
return findSentenceBoundariesWithResponse(BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
*
* @param texts Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts, String clientTraceId,
String language, String script) {
return findSentenceBoundariesInner(convertTextToData(texts), clientTraceId, language, script);
}
/**
* Find Sentence Boundaries.
*
* @param text Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(String text, String clientTraceId, String language,
String script) {
return findSentenceBoundaries(Arrays.asList(text), clientTraceId, language, script);
}
/**
* Find Sentence Boundaries.
*
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return findSentenceBoundariesWithResponse(BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
*
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts) {
return findSentenceBoundariesInner(convertTextToData(texts));
}
/**
* Find Sentence Boundaries.
*
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(String text) {
return findSentenceBoundaries(Arrays.asList(text));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<DictionaryLookupItem>> lookupDictionaryEntriesInner(String sourceLanguage, String targetLanguage,
List<InputTextItem> requestBody, String clientTraceId) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
return lookupDictionaryEntriesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param texts Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
List<String> texts, String clientTraceId) {
return lookupDictionaryEntriesInner(sourceLanguage, targetLanguage, convertTextToData(texts), clientTraceId);
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param text Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
String text, String clientTraceId) {
return lookupDictionaryEntries(sourceLanguage, targetLanguage, Arrays.asList(text), clientTraceId);
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<DictionaryLookupItem>> lookupDictionaryEntriesInner(String sourceLanguage, String targetLanguage,
List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryEntriesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
List<String> texts) {
return lookupDictionaryEntriesInner(sourceLanguage, targetLanguage, convertTextToData(texts));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
String text) {
return lookupDictionaryEntries(sourceLanguage, targetLanguage, Arrays.asList(text));
}
/**
* Lookup Dictionary Examples.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryExampleItem>> lookupDictionaryExamples(String sourceLanguage, String targetLanguage,
List<DictionaryExampleTextItem> requestBody, String clientTraceId) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
return lookupDictionaryExamplesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM));
}
/**
* Lookup Dictionary Examples.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryExampleItem>> lookupDictionaryExamples(String sourceLanguage, String targetLanguage,
List<DictionaryExampleTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryExamplesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM));
}
@Generated
private static final TypeReference<List<TransliteratedText>> TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT
= new TypeReference<List<TransliteratedText>>() {
};
@Generated
private static final TypeReference<List<BreakSentenceItem>> TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM
= new TypeReference<List<BreakSentenceItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryLookupItem>> TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM
= new TypeReference<List<DictionaryLookupItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryExampleItem>> TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM
= new TypeReference<List<DictionaryExampleItem>>() {
};
@Generated
private static final TypeReference<List<TranslatedTextItem>> TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM
= new TypeReference<List<TranslatedTextItem>>() {
};
/**
* Gets the set of languages currently supported by other operations of the Translator.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>scope</td><td>String</td><td>No</td><td>A comma-separated list of names defining the group of languages
* to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* <tr><td>Accept-Language</td><td>String</td><td>No</td><td>The language to use for user interface strings. Some of
* the fields in the response are names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.</td></tr>
* <tr><td>If-None-Match</td><td>String</td><td>No</td><td>Passing the value of the ETag response header in an
* If-None-Match field will allow the service to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response
* body.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* {
* translation (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* }
* transliteration (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* scripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* toScripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* ]
* }
* ]
* }
* }
* dictionary (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* translations (Required): [
* (Required){
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* code: String (Required)
* }
* ]
* }
* }
* }
* }</pre>
*
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the set of languages currently supported by other operations of the Translator along with
* {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> getSupportedLanguagesWithResponse(RequestOptions requestOptions) {
return this.serviceClient.getSupportedLanguagesWithResponseAsync(requestOptions);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param scope A comma-separated list of names defining the group of languages to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<GetSupportedLanguagesResult> getSupportedLanguages(String clientTraceId, String scope,
String acceptLanguage, String ifNoneMatch) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (scope != null) {
requestOptions.addQueryParam("scope", scope, false);
}
if (acceptLanguage != null) {
requestOptions.setHeader(HttpHeaderName.ACCEPT_LANGUAGE, acceptLanguage);
}
if (ifNoneMatch != null) {
requestOptions.setHeader(HttpHeaderName.IF_NONE_MATCH, ifNoneMatch);
}
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param scopes List of names defining the group of languages to return.
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages(String clientTraceId, List<LanguageScope> scopes,
String acceptLanguage, String ifNoneMatch) {
return getSupportedLanguages(clientTraceId, convertToScopesString(scopes), acceptLanguage, ifNoneMatch);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages() {
RequestOptions requestOptions = new RequestOptions();
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
private ArrayList<InputTextItem> convertTextToData(List<String> texts) {
ArrayList<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(new InputTextItem(text));
}
return content;
}
private String convertToScopesString(List<LanguageScope> scopes) {
if (scopes == null) {
return null;
}
String result = "";
for (LanguageScope scope : scopes) {
if (!result.isEmpty()) {
result += ",";
}
result += scope.toString();
}
return result;
}
} | class TextTranslationAsyncClient {
@Generated
private final TextTranslationClientImpl serviceClient;
/**
* Initializes an instance of TextTranslationAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
@Generated
TextTranslationAsyncClient(TextTranslationClientImpl serviceClient) {
this.serviceClient = serviceClient;
}
/**
* Translate Text.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>from</td><td>String</td><td>No</td><td>Specifies the language of the input text. Find which languages are
* available to translate from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.</td></tr>
* <tr><td>textType</td><td>String</td><td>No</td><td>Defines whether the text being translated is plain text or
* HTML text. Any HTML needs to be a well-formed,
* complete element. Possible values are: plain (default) or html. Allowed values: "Plain", "Html".</td></tr>
* <tr><td>category</td><td>String</td><td>No</td><td>A string specifying the category (domain) of the translation.
* This parameter is used to get translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.</td></tr>
* <tr><td>profanityAction</td><td>String</td><td>No</td><td>Specifies how profanities should be treated in
* translations.
* Possible values are: NoAction (default), Marked or Deleted. Allowed values: "NoAction", "Marked",
* "Deleted".</td></tr>
* <tr><td>profanityMarker</td><td>String</td><td>No</td><td>Specifies how profanities should be marked in
* translations.
* Possible values are: Asterisk (default) or Tag. . Allowed values: "Asterisk", "Tag".</td></tr>
* <tr><td>includeAlignment</td><td>Boolean</td><td>No</td><td>Specifies whether to include alignment projection
* from source text to translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>includeSentenceLength</td><td>Boolean</td><td>No</td><td>Specifies whether to include sentence boundaries
* for the input text and the translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>suggestedFrom</td><td>String</td><td>No</td><td>Specifies a fallback language if the language of the
* input text can't be identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.</td></tr>
* <tr><td>fromScript</td><td>String</td><td>No</td><td>Specifies the script of the input text.</td></tr>
* <tr><td>toScript</td><td>String</td><td>No</td><td>Specifies the script of the translated text.</td></tr>
* <tr><td>allowFallback</td><td>Boolean</td><td>No</td><td>Specifies that the service is allowed to fall back to a
* general system when a custom system doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't
* exist.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* translations (Required): [
* (Required){
* to: String (Required)
* text: String (Required)
* transliteration (Optional): {
* text: String (Required)
* script: String (Required)
* }
* alignment (Optional): {
* proj: String (Required)
* }
* sentLen (Optional): {
* srcSentLen (Required): [
* int (Required)
* ]
* transSentLen (Required): [
* int (Required)
* ]
* }
* }
* ]
* sourceText (Optional): {
* text: String (Required)
* }
* }
* ]
* }</pre>
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> translateWithResponse(List<String> targetLanguages, BinaryData body,
RequestOptions requestOptions) {
return this.serviceClient.translateWithResponseAsync(targetLanguages, body, requestOptions);
}
/**
* Transliterate Text.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* script: String (Required)
* }
* ]
* }</pre>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> transliterateWithResponse(String language, String sourceLanguageScript,
String targetLanguageScript, BinaryData body, RequestOptions requestOptions) {
return this.serviceClient.transliterateWithResponseAsync(language, sourceLanguageScript, targetLanguageScript,
body, requestOptions);
}
/**
* Find Sentence Boundaries.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>language</td><td>String</td><td>No</td><td>Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.</td></tr>
* <tr><td>script</td><td>String</td><td>No</td><td>Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* sentLen (Required): [
* int (Required)
* ]
* }
* ]
* }</pre>
*
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> findSentenceBoundariesWithResponse(BinaryData body,
RequestOptions requestOptions) {
return this.serviceClient.findSentenceBoundariesWithResponseAsync(body, requestOptions);
}
/**
* Lookup Dictionary Entries.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* displaySource: String (Required)
* translations (Required): [
* (Required){
* normalizedTarget: String (Required)
* displayTarget: String (Required)
* posTag: String (Required)
* confidence: double (Required)
* prefixWord: String (Required)
* backTranslations (Required): [
* (Required){
* normalizedText: String (Required)
* displayText: String (Required)
* numExamples: int (Required)
* frequencyCount: int (Required)
* }
* ]
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryEntriesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData body, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryEntriesWithResponseAsync(sourceLanguage, targetLanguage, body,
requestOptions);
}
/**
* Lookup Dictionary Examples.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* translation: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* normalizedTarget: String (Required)
* examples (Required): [
* (Required){
* sourcePrefix: String (Required)
* sourceTerm: String (Required)
* sourceSuffix: String (Required)
* targetPrefix: String (Required)
* targetTerm: String (Required)
* targetSuffix: String (Required)
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryExamplesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData body, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryExamplesWithResponseAsync(sourceLanguage, targetLanguage, body,
requestOptions);
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param body Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param sourceLanguage Specifies the language of the input text. Find which languages are available to translate
* from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.
* @param textType Defines whether the text being translated is plain text or HTML text. Any HTML needs to be a
* well-formed,
* complete element. Possible values are: plain (default) or html.
* @param category A string specifying the category (domain) of the translation. This parameter is used to get
* translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.
* @param profanityAction Specifies how profanities should be treated in translations.
* Possible values are: NoAction (default), Marked or Deleted.
* @param profanityMarker Specifies how profanities should be marked in translations.
* Possible values are: Asterisk (default) or Tag.
* @param includeAlignment Specifies whether to include alignment projection from source text to translated text.
* Possible values are: true or false (default).
* @param includeSentenceLength Specifies whether to include sentence boundaries for the input text and the
* translated text.
* Possible values are: true or false (default).
* @param suggestedSourceLanguage Specifies a fallback language if the language of the input text can't be
* identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.
* @param sourceLanguageScript Specifies the script of the input text.
* @param targetLanguageScript Specifies the script of the translated text.
* @param allowFallback Specifies that the service is allowed to fall back to a general system when a custom system
* doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't exist.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<TranslatedTextItem>> translate(List<String> targetLanguages, List<InputTextItem> body,
String clientTraceId, String sourceLanguage, TextType textType, String category,
ProfanityAction profanityAction, ProfanityMarker profanityMarker, Boolean includeAlignment,
Boolean includeSentenceLength, String suggestedSourceLanguage, String sourceLanguageScript,
String targetLanguageScript, Boolean allowFallback) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (sourceLanguage != null) {
requestOptions.addQueryParam("from", sourceLanguage, false);
}
if (textType != null) {
requestOptions.addQueryParam("textType", textType.toString(), false);
}
if (category != null) {
requestOptions.addQueryParam("category", category, false);
}
if (profanityAction != null) {
requestOptions.addQueryParam("profanityAction", profanityAction.toString(), false);
}
if (profanityMarker != null) {
requestOptions.addQueryParam("profanityMarker", profanityMarker.toString(), false);
}
if (includeAlignment != null) {
requestOptions.addQueryParam("includeAlignment", String.valueOf(includeAlignment), false);
}
if (includeSentenceLength != null) {
requestOptions.addQueryParam("includeSentenceLength", String.valueOf(includeSentenceLength), false);
}
if (suggestedSourceLanguage != null) {
requestOptions.addQueryParam("suggestedFrom", suggestedSourceLanguage, false);
}
if (sourceLanguageScript != null) {
requestOptions.addQueryParam("fromScript", sourceLanguageScript, false);
}
if (targetLanguageScript != null) {
requestOptions.addQueryParam("toScript", targetLanguageScript, false);
}
if (allowFallback != null) {
requestOptions.addQueryParam("allowFallback", String.valueOf(allowFallback), false);
}
return translateWithResponse(targetLanguages, BinaryData.fromObject(body), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<TranslatedTextItem>> translateInner(List<String> targetLanguages, List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return translateWithResponse(targetLanguages, BinaryData.fromObject(body), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
* <p>
* This method is used when you have single target language and multiple texts to translate.
* </p>
*
* @param targetLanguage Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String targetLanguage, List<String> texts) {
return translateInner(Arrays.asList(targetLanguage), convertTextToData(texts));
}
/**
* Translate Text.
* <p>
* This method is used when you have single target language and single text to translate.
* </p>
*
* @param targetLanguage Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<TranslatedTextItem> translate(String targetLanguage, String text) {
return translate(targetLanguage, Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Translate Text.
* <p>
* This method is used when you have one input text and the optional parameters are needed such as specification
* of a source language, profanity handling etc.
* </p>
*
* @param text Text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<TranslatedTextItem> translate(String text, TranslateOptions translateOptions) {
return translate(Arrays.asList(text), translateOptions)
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Translate Text.
* <p>
* This method is used when you have multiple texts and the optional parameters are needed such as specification
* of a source language, profanity handling etc..
* </p>
*
* @param texts List of text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(List<String> texts, TranslateOptions translateOptions) {
List<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(.
*/
private Mono<List<TransliteratedText>> transliterateInner(String language, String sourceLanguageScript,
String targetLanguageScript, List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return transliterateWithResponse(language, sourceLanguageScript, targetLanguageScript,
BinaryData.fromObject(body), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT));
}
/**
* Transliterate Text.
* <p>
* This method is used when you have multiple texts to transliterate and you want to provide client trace id.
* </p>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, List<String> body) {
return transliterateInner(language, sourceLanguageScript, targetLanguageScript, convertTextToData(body));
}
/**
* Transliterate Text.
* <p>
* This method is used when you have single text to transliterate and you want to provide client trace id.
* </p>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<TransliteratedText> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, String text) {
return transliterate(language, sourceLanguageScript, targetLanguageScript, Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Find Sentence Boundaries.
*
* @param body Defines the content of the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> body, String language,
String script) {
RequestOptions requestOptions = new RequestOptions();
if (language != null) {
requestOptions.addQueryParam("language", language, false);
}
if (script != null) {
requestOptions.addQueryParam("script", script, false);
}
return findSentenceBoundariesWithResponse(BinaryData.fromObject(body), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have multiple texts for which you want to find sentence boundaries and you want to
* provide
* client trace id.
* </p>
*
* @param texts Defines the content of the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts, String language, String script) {
return findSentenceBoundariesInner(convertTextToData(texts), language, script);
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have single text for which you want to find sentence boundaries and you want to
* provide
* client trace id.
* </p>
*
* @param text Defines the content of the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BreakSentenceItem> findSentenceBoundaries(String text, String language, String script) {
return findSentenceBoundaries(Arrays.asList(text), language, script)
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Find Sentence Boundaries.
*
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return findSentenceBoundariesWithResponse(BinaryData.fromObject(body), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have multiple texts for which you want to find sentence boundaries and you want
* the source language to be auto-detected by the service.
* </p>
*
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts) {
return findSentenceBoundariesInner(convertTextToData(texts));
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have single text for which you want to find sentence boundaries and you want
* the source language to be auto-detected by the service.
* </p>
*
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BreakSentenceItem> findSentenceBoundaries(String text) {
return findSentenceBoundaries(Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<DictionaryLookupItem>> lookupDictionaryEntriesInner(String sourceLanguage, String targetLanguage,
List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryEntriesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(body),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM));
}
/**
* Lookup Dictionary Entries.
* <p>
* This method is used when you want lookup multiple entries in the dictionary and you want to provide
* client trace id.
* </p>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
List<String> texts) {
return lookupDictionaryEntriesInner(sourceLanguage, targetLanguage, convertTextToData(texts));
}
/**
* Lookup Dictionary Entries.
* <p>
* This method is used when you want lookup single entry in the dictionary and you want to provide
* client trace id.
* </p>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DictionaryLookupItem> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
String text) {
return lookupDictionaryEntries(sourceLanguage, targetLanguage, Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Lookup Dictionary Examples.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
public Mono<List<DictionaryExampleItem>> lookupDictionaryExamples(String sourceLanguage, String targetLanguage,
List<DictionaryExampleTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryExamplesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(body),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM));
}
@Generated
private static final TypeReference<List<TransliteratedText>> TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT
= new TypeReference<List<TransliteratedText>>() {
};
@Generated
private static final TypeReference<List<BreakSentenceItem>> TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM
= new TypeReference<List<BreakSentenceItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryLookupItem>> TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM
= new TypeReference<List<DictionaryLookupItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryExampleItem>> TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM
= new TypeReference<List<DictionaryExampleItem>>() {
};
@Generated
private static final TypeReference<List<TranslatedTextItem>> TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM
= new TypeReference<List<TranslatedTextItem>>() {
};
/**
* Gets the set of languages currently supported by other operations of the Translator.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>scope</td><td>String</td><td>No</td><td>A comma-separated list of names defining the group of languages
* to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* <tr><td>Accept-Language</td><td>String</td><td>No</td><td>The language to use for user interface strings. Some of
* the fields in the response are names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.</td></tr>
* <tr><td>If-None-Match</td><td>String</td><td>No</td><td>Passing the value of the ETag response header in an
* If-None-Match field will allow the service to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response
* body.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* {
* translation (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* }
* transliteration (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* scripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* toScripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* ]
* }
* ]
* }
* }
* dictionary (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* translations (Required): [
* (Required){
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* code: String (Required)
* }
* ]
* }
* }
* }
* }</pre>
*
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the set of languages currently supported by other operations of the Translator along with
* {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> getSupportedLanguagesWithResponse(RequestOptions requestOptions) {
return this.serviceClient.getSupportedLanguagesWithResponseAsync(requestOptions);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param scope A comma-separated list of names defining the group of languages to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
private Mono<GetSupportedLanguagesResult> getSupportedLanguages(String scope, String acceptLanguage,
String ifNoneMatch) {
RequestOptions requestOptions = new RequestOptions();
if (scope != null) {
requestOptions.addQueryParam("scope", scope, false);
}
if (acceptLanguage != null) {
requestOptions.setHeader(HttpHeaderName.ACCEPT_LANGUAGE, acceptLanguage);
}
if (ifNoneMatch != null) {
requestOptions.setHeader(HttpHeaderName.IF_NONE_MATCH, ifNoneMatch);
}
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param scopes List of names defining the group of languages to return.
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages(List<LanguageScope> scopes, String acceptLanguage,
String ifNoneMatch) {
return getSupportedLanguages(convertToScopesString(scopes), acceptLanguage, ifNoneMatch);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages() {
RequestOptions requestOptions = new RequestOptions();
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
private List<InputTextItem> convertTextToData(List<String> texts) {
List<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(new InputTextItem(text));
}
return content;
}
private String convertToScopesString(List<LanguageScope> scopes) {
if (scopes == null) {
return null;
}
String result = "";
for (LanguageScope scope : scopes) {
if (!result.isEmpty()) {
result += ",";
}
result += scope.toString();
}
return result;
}
} |
Changed. | public Mono<List<TranslatedTextItem>> translate(List<String> texts, TranslateOptions translateOptions) {
ArrayList<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(new InputTextItem(text));
}
return translate(translateOptions.getTargetLanguages(), content, translateOptions.getClientTraceId(),
translateOptions.getSourceLanguage(), translateOptions.getTextType(), translateOptions.getCategory(),
translateOptions.getProfanityAction(), translateOptions.getProfanityMarker(),
translateOptions.isIncludeAlignment(), translateOptions.isIncludeSentenceLength(),
translateOptions.getSuggestedSourceLanguage(), translateOptions.getSourceLanguageScript(),
translateOptions.getTargetLanguageScript(), translateOptions.isAllowFallback());
} | ArrayList<InputTextItem> content = new ArrayList<>(); | new InputTextItem(text));
}
return translate(translateOptions.getTargetLanguages(), content, translateOptions.getClientTraceId(),
translateOptions.getSourceLanguage(), translateOptions.getTextType(), translateOptions.getCategory(),
translateOptions.getProfanityAction(), translateOptions.getProfanityMarker(),
translateOptions.isIncludeAlignment(), translateOptions.isIncludeSentenceLength(),
translateOptions.getSuggestedSourceLanguage(), translateOptions.getSourceLanguageScript(),
translateOptions.getTargetLanguageScript(), translateOptions.isAllowFallback());
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono} | class TextTranslationAsyncClient {
@Generated
private final TextTranslationClientImpl serviceClient;
/**
* Initializes an instance of TextTranslationAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
@Generated
TextTranslationAsyncClient(TextTranslationClientImpl serviceClient) {
this.serviceClient = serviceClient;
}
/**
* Translate Text.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>from</td><td>String</td><td>No</td><td>Specifies the language of the input text. Find which languages are
* available to translate from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.</td></tr>
* <tr><td>textType</td><td>String</td><td>No</td><td>Defines whether the text being translated is plain text or
* HTML text. Any HTML needs to be a well-formed,
* complete element. Possible values are: plain (default) or html. Allowed values: "Plain", "Html".</td></tr>
* <tr><td>category</td><td>String</td><td>No</td><td>A string specifying the category (domain) of the translation.
* This parameter is used to get translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.</td></tr>
* <tr><td>profanityAction</td><td>String</td><td>No</td><td>Specifies how profanities should be treated in
* translations.
* Possible values are: NoAction (default), Marked or Deleted. Allowed values: "NoAction", "Marked",
* "Deleted".</td></tr>
* <tr><td>profanityMarker</td><td>String</td><td>No</td><td>Specifies how profanities should be marked in
* translations.
* Possible values are: Asterisk (default) or Tag. . Allowed values: "Asterisk", "Tag".</td></tr>
* <tr><td>includeAlignment</td><td>Boolean</td><td>No</td><td>Specifies whether to include alignment projection
* from source text to translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>includeSentenceLength</td><td>Boolean</td><td>No</td><td>Specifies whether to include sentence boundaries
* for the input text and the translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>suggestedFrom</td><td>String</td><td>No</td><td>Specifies a fallback language if the language of the
* input text can't be identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.</td></tr>
* <tr><td>fromScript</td><td>String</td><td>No</td><td>Specifies the script of the input text.</td></tr>
* <tr><td>toScript</td><td>String</td><td>No</td><td>Specifies the script of the translated text.</td></tr>
* <tr><td>allowFallback</td><td>Boolean</td><td>No</td><td>Specifies that the service is allowed to fall back to a
* general system when a custom system doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't
* exist.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* translations (Required): [
* (Required){
* to: String (Required)
* text: String (Required)
* transliteration (Optional): {
* text: String (Required)
* script: String (Required)
* }
* alignment (Optional): {
* proj: String (Required)
* }
* sentLen (Optional): {
* srcSentLen (Required): [
* int (Required)
* ]
* transSentLen (Required): [
* int (Required)
* ]
* }
* }
* ]
* sourceText (Optional): {
* text: String (Required)
* }
* }
* ]
* }</pre>
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> translateWithResponse(List<String> targetLanguages, BinaryData requestBody,
RequestOptions requestOptions) {
return this.serviceClient.translateWithResponseAsync(targetLanguages, requestBody, requestOptions);
}
/**
* Transliterate Text.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* script: String (Required)
* }
* ]
* }</pre>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> transliterateWithResponse(String language, String sourceLanguageScript,
String targetLanguageScript, BinaryData requestBody, RequestOptions requestOptions) {
return this.serviceClient.transliterateWithResponseAsync(language, sourceLanguageScript, targetLanguageScript,
requestBody, requestOptions);
}
/**
* Find Sentence Boundaries.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>language</td><td>String</td><td>No</td><td>Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.</td></tr>
* <tr><td>script</td><td>String</td><td>No</td><td>Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* sentLen (Required): [
* int (Required)
* ]
* }
* ]
* }</pre>
*
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> findSentenceBoundariesWithResponse(BinaryData requestBody,
RequestOptions requestOptions) {
return this.serviceClient.findSentenceBoundariesWithResponseAsync(requestBody, requestOptions);
}
/**
* Lookup Dictionary Entries.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* displaySource: String (Required)
* translations (Required): [
* (Required){
* normalizedTarget: String (Required)
* displayTarget: String (Required)
* posTag: String (Required)
* confidence: double (Required)
* prefixWord: String (Required)
* backTranslations (Required): [
* (Required){
* normalizedText: String (Required)
* displayText: String (Required)
* numExamples: int (Required)
* frequencyCount: int (Required)
* }
* ]
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryEntriesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData requestBody, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryEntriesWithResponseAsync(sourceLanguage, targetLanguage, requestBody,
requestOptions);
}
/**
* Lookup Dictionary Examples.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* translation: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* normalizedTarget: String (Required)
* examples (Required): [
* (Required){
* sourcePrefix: String (Required)
* sourceTerm: String (Required)
* sourceSuffix: String (Required)
* targetPrefix: String (Required)
* targetTerm: String (Required)
* targetSuffix: String (Required)
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryExamplesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData requestBody, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryExamplesWithResponseAsync(sourceLanguage, targetLanguage, requestBody,
requestOptions);
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param sourceLanguage Specifies the language of the input text. Find which languages are available to translate
* from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.
* @param textType Defines whether the text being translated is plain text or HTML text. Any HTML needs to be a
* well-formed,
* complete element. Possible values are: plain (default) or html.
* @param category A string specifying the category (domain) of the translation. This parameter is used to get
* translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.
* @param profanityAction Specifies how profanities should be treated in translations.
* Possible values are: NoAction (default), Marked or Deleted.
* @param profanityMarker Specifies how profanities should be marked in translations.
* Possible values are: Asterisk (default) or Tag.
* @param includeAlignment Specifies whether to include alignment projection from source text to translated text.
* Possible values are: true or false (default).
* @param includeSentenceLength Specifies whether to include sentence boundaries for the input text and the
* translated text.
* Possible values are: true or false (default).
* @param suggestedSourceLanguage Specifies a fallback language if the language of the input text can't be
* identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.
* @param sourceLanguageScript Specifies the script of the input text.
* @param targetLanguageScript Specifies the script of the translated text.
* @param allowFallback Specifies that the service is allowed to fall back to a general system when a custom system
* doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't exist.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<TranslatedTextItem>> translate(List<String> targetLanguages, List<InputTextItem> requestBody,
String clientTraceId, String sourceLanguage, TextType textType, String category,
ProfanityAction profanityAction, ProfanityMarker profanityMarker, Boolean includeAlignment,
Boolean includeSentenceLength, String suggestedSourceLanguage, String sourceLanguageScript,
String targetLanguageScript, Boolean allowFallback) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (sourceLanguage != null) {
requestOptions.addQueryParam("from", sourceLanguage, false);
}
if (textType != null) {
requestOptions.addQueryParam("textType", textType.toString(), false);
}
if (category != null) {
requestOptions.addQueryParam("category", category, false);
}
if (profanityAction != null) {
requestOptions.addQueryParam("profanityAction", profanityAction.toString(), false);
}
if (profanityMarker != null) {
requestOptions.addQueryParam("profanityMarker", profanityMarker.toString(), false);
}
if (includeAlignment != null) {
requestOptions.addQueryParam("includeAlignment", String.valueOf(includeAlignment), false);
}
if (includeSentenceLength != null) {
requestOptions.addQueryParam("includeSentenceLength", String.valueOf(includeSentenceLength), false);
}
if (suggestedSourceLanguage != null) {
requestOptions.addQueryParam("suggestedFrom", suggestedSourceLanguage, false);
}
if (sourceLanguageScript != null) {
requestOptions.addQueryParam("fromScript", sourceLanguageScript, false);
}
if (targetLanguageScript != null) {
requestOptions.addQueryParam("toScript", targetLanguageScript, false);
}
if (allowFallback != null) {
requestOptions.addQueryParam("allowFallback", String.valueOf(allowFallback), false);
}
return translateWithResponse(targetLanguages, BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<TranslatedTextItem>> translateInner(List<String> targetLanguages,
List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return translateWithResponse(targetLanguages, BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
*
* @param targetLanguage Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String targetLanguage, List<String> texts) {
return translateInner(Arrays.asList(targetLanguage), convertTextToData(texts));
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String targetLanguages, String text) {
return translate(targetLanguages, Arrays.asList(text));
}
/**
* Translate Text.
*
* @param text Text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String text, TranslateOptions translateOptions) {
return translate(Arrays.asList(text), translateOptions);
}
/**
* Translate Text.
*
* @param texts List of text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(List<String> texts, TranslateOptions translateOptions) {
ArrayList<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<TransliteratedText>> transliterateInner(String language, String sourceLanguageScript,
String targetLanguageScript, List<InputTextItem> requestBody, String clientTraceId) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
return transliterateWithResponse(language, sourceLanguageScript, targetLanguageScript,
BinaryData.fromObject(requestBody), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT));
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, List<String> requestBody, String clientTraceId) {
return transliterateInner(language, sourceLanguageScript, targetLanguageScript, convertTextToData(requestBody),
clientTraceId);
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param text Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, String text, String clientTraceId) {
return transliterate(language, sourceLanguageScript, targetLanguageScript, Arrays.asList(text), clientTraceId);
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterateInner(String language, String sourceLanguageScript,
String targetLanguageScript, List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return transliterateWithResponse(language, sourceLanguageScript, targetLanguageScript,
BinaryData.fromObject(requestBody), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT));
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, List<String> requestBody) {
return transliterateInner(language, sourceLanguageScript, targetLanguageScript, convertTextToData(requestBody));
}
/**
* Transliterate Text.
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, String text) {
return transliterate(language, sourceLanguageScript, targetLanguageScript, Arrays.asList(text));
}
/**
* Find Sentence Boundaries.
*
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> requestBody,
String clientTraceId, String language, String script) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (language != null) {
requestOptions.addQueryParam("language", language, false);
}
if (script != null) {
requestOptions.addQueryParam("script", script, false);
}
return findSentenceBoundariesWithResponse(BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
*
* @param texts Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts, String clientTraceId,
String language, String script) {
return findSentenceBoundariesInner(convertTextToData(texts), clientTraceId, language, script);
}
/**
* Find Sentence Boundaries.
*
* @param text Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(String text, String clientTraceId, String language,
String script) {
return findSentenceBoundaries(Arrays.asList(text), clientTraceId, language, script);
}
/**
* Find Sentence Boundaries.
*
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return findSentenceBoundariesWithResponse(BinaryData.fromObject(requestBody), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
*
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts) {
return findSentenceBoundariesInner(convertTextToData(texts));
}
/**
* Find Sentence Boundaries.
*
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(String text) {
return findSentenceBoundaries(Arrays.asList(text));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<DictionaryLookupItem>> lookupDictionaryEntriesInner(String sourceLanguage, String targetLanguage,
List<InputTextItem> requestBody, String clientTraceId) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
return lookupDictionaryEntriesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param texts Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
List<String> texts, String clientTraceId) {
return lookupDictionaryEntriesInner(sourceLanguage, targetLanguage, convertTextToData(texts), clientTraceId);
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param text Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
String text, String clientTraceId) {
return lookupDictionaryEntries(sourceLanguage, targetLanguage, Arrays.asList(text), clientTraceId);
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<List<DictionaryLookupItem>> lookupDictionaryEntriesInner(String sourceLanguage, String targetLanguage,
List<InputTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryEntriesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
List<String> texts) {
return lookupDictionaryEntriesInner(sourceLanguage, targetLanguage, convertTextToData(texts));
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
String text) {
return lookupDictionaryEntries(sourceLanguage, targetLanguage, Arrays.asList(text));
}
/**
* Lookup Dictionary Examples.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryExampleItem>> lookupDictionaryExamples(String sourceLanguage, String targetLanguage,
List<DictionaryExampleTextItem> requestBody, String clientTraceId) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
return lookupDictionaryExamplesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM));
}
/**
* Lookup Dictionary Examples.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param requestBody Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryExampleItem>> lookupDictionaryExamples(String sourceLanguage, String targetLanguage,
List<DictionaryExampleTextItem> requestBody) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryExamplesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(requestBody),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM));
}
@Generated
private static final TypeReference<List<TransliteratedText>> TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT
= new TypeReference<List<TransliteratedText>>() {
};
@Generated
private static final TypeReference<List<BreakSentenceItem>> TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM
= new TypeReference<List<BreakSentenceItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryLookupItem>> TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM
= new TypeReference<List<DictionaryLookupItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryExampleItem>> TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM
= new TypeReference<List<DictionaryExampleItem>>() {
};
@Generated
private static final TypeReference<List<TranslatedTextItem>> TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM
= new TypeReference<List<TranslatedTextItem>>() {
};
/**
* Gets the set of languages currently supported by other operations of the Translator.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>scope</td><td>String</td><td>No</td><td>A comma-separated list of names defining the group of languages
* to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* <tr><td>Accept-Language</td><td>String</td><td>No</td><td>The language to use for user interface strings. Some of
* the fields in the response are names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.</td></tr>
* <tr><td>If-None-Match</td><td>String</td><td>No</td><td>Passing the value of the ETag response header in an
* If-None-Match field will allow the service to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response
* body.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* {
* translation (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* }
* transliteration (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* scripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* toScripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* ]
* }
* ]
* }
* }
* dictionary (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* translations (Required): [
* (Required){
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* code: String (Required)
* }
* ]
* }
* }
* }
* }</pre>
*
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the set of languages currently supported by other operations of the Translator along with
* {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> getSupportedLanguagesWithResponse(RequestOptions requestOptions) {
return this.serviceClient.getSupportedLanguagesWithResponseAsync(requestOptions);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param scope A comma-separated list of names defining the group of languages to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<GetSupportedLanguagesResult> getSupportedLanguages(String clientTraceId, String scope,
String acceptLanguage, String ifNoneMatch) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (scope != null) {
requestOptions.addQueryParam("scope", scope, false);
}
if (acceptLanguage != null) {
requestOptions.setHeader(HttpHeaderName.ACCEPT_LANGUAGE, acceptLanguage);
}
if (ifNoneMatch != null) {
requestOptions.setHeader(HttpHeaderName.IF_NONE_MATCH, ifNoneMatch);
}
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param scopes List of names defining the group of languages to return.
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages(String clientTraceId, List<LanguageScope> scopes,
String acceptLanguage, String ifNoneMatch) {
return getSupportedLanguages(clientTraceId, convertToScopesString(scopes), acceptLanguage, ifNoneMatch);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages() {
RequestOptions requestOptions = new RequestOptions();
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
private ArrayList<InputTextItem> convertTextToData(List<String> texts) {
ArrayList<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(new InputTextItem(text));
}
return content;
}
private String convertToScopesString(List<LanguageScope> scopes) {
if (scopes == null) {
return null;
}
String result = "";
for (LanguageScope scope : scopes) {
if (!result.isEmpty()) {
result += ",";
}
result += scope.toString();
}
return result;
}
} | class TextTranslationAsyncClient {
@Generated
private final TextTranslationClientImpl serviceClient;
/**
* Initializes an instance of TextTranslationAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
@Generated
TextTranslationAsyncClient(TextTranslationClientImpl serviceClient) {
this.serviceClient = serviceClient;
}
/**
* Translate Text.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>from</td><td>String</td><td>No</td><td>Specifies the language of the input text. Find which languages are
* available to translate from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.</td></tr>
* <tr><td>textType</td><td>String</td><td>No</td><td>Defines whether the text being translated is plain text or
* HTML text. Any HTML needs to be a well-formed,
* complete element. Possible values are: plain (default) or html. Allowed values: "Plain", "Html".</td></tr>
* <tr><td>category</td><td>String</td><td>No</td><td>A string specifying the category (domain) of the translation.
* This parameter is used to get translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.</td></tr>
* <tr><td>profanityAction</td><td>String</td><td>No</td><td>Specifies how profanities should be treated in
* translations.
* Possible values are: NoAction (default), Marked or Deleted. Allowed values: "NoAction", "Marked",
* "Deleted".</td></tr>
* <tr><td>profanityMarker</td><td>String</td><td>No</td><td>Specifies how profanities should be marked in
* translations.
* Possible values are: Asterisk (default) or Tag. . Allowed values: "Asterisk", "Tag".</td></tr>
* <tr><td>includeAlignment</td><td>Boolean</td><td>No</td><td>Specifies whether to include alignment projection
* from source text to translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>includeSentenceLength</td><td>Boolean</td><td>No</td><td>Specifies whether to include sentence boundaries
* for the input text and the translated text.
* Possible values are: true or false (default).</td></tr>
* <tr><td>suggestedFrom</td><td>String</td><td>No</td><td>Specifies a fallback language if the language of the
* input text can't be identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.</td></tr>
* <tr><td>fromScript</td><td>String</td><td>No</td><td>Specifies the script of the input text.</td></tr>
* <tr><td>toScript</td><td>String</td><td>No</td><td>Specifies the script of the translated text.</td></tr>
* <tr><td>allowFallback</td><td>Boolean</td><td>No</td><td>Specifies that the service is allowed to fall back to a
* general system when a custom system doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't
* exist.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* translations (Required): [
* (Required){
* to: String (Required)
* text: String (Required)
* transliteration (Optional): {
* text: String (Required)
* script: String (Required)
* }
* alignment (Optional): {
* proj: String (Required)
* }
* sentLen (Optional): {
* srcSentLen (Required): [
* int (Required)
* ]
* transSentLen (Required): [
* int (Required)
* ]
* }
* }
* ]
* sourceText (Optional): {
* text: String (Required)
* }
* }
* ]
* }</pre>
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> translateWithResponse(List<String> targetLanguages, BinaryData body,
RequestOptions requestOptions) {
return this.serviceClient.translateWithResponseAsync(targetLanguages, body, requestOptions);
}
/**
* Transliterate Text.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* script: String (Required)
* }
* ]
* }</pre>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> transliterateWithResponse(String language, String sourceLanguageScript,
String targetLanguageScript, BinaryData body, RequestOptions requestOptions) {
return this.serviceClient.transliterateWithResponseAsync(language, sourceLanguageScript, targetLanguageScript,
body, requestOptions);
}
/**
* Find Sentence Boundaries.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>language</td><td>String</td><td>No</td><td>Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.</td></tr>
* <tr><td>script</td><td>String</td><td>No</td><td>Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* detectedLanguage (Optional): {
* language: String (Required)
* score: double (Required)
* }
* sentLen (Required): [
* int (Required)
* ]
* }
* ]
* }</pre>
*
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> findSentenceBoundariesWithResponse(BinaryData body,
RequestOptions requestOptions) {
return this.serviceClient.findSentenceBoundariesWithResponseAsync(body, requestOptions);
}
/**
* Lookup Dictionary Entries.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* displaySource: String (Required)
* translations (Required): [
* (Required){
* normalizedTarget: String (Required)
* displayTarget: String (Required)
* posTag: String (Required)
* confidence: double (Required)
* prefixWord: String (Required)
* backTranslations (Required): [
* (Required){
* normalizedText: String (Required)
* displayText: String (Required)
* numExamples: int (Required)
* frequencyCount: int (Required)
* }
* ]
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryEntriesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData body, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryEntriesWithResponseAsync(sourceLanguage, targetLanguage, body,
requestOptions);
}
/**
* Lookup Dictionary Examples.
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Request Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* text: String (Required)
* translation: String (Required)
* }
* ]
* }</pre>
*
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* [
* (Required){
* normalizedSource: String (Required)
* normalizedTarget: String (Required)
* examples (Required): [
* (Required){
* sourcePrefix: String (Required)
* sourceTerm: String (Required)
* sourceSuffix: String (Required)
* targetPrefix: String (Required)
* targetTerm: String (Required)
* targetSuffix: String (Required)
* }
* ]
* }
* ]
* }</pre>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the response body along with {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> lookupDictionaryExamplesWithResponse(String sourceLanguage, String targetLanguage,
BinaryData body, RequestOptions requestOptions) {
return this.serviceClient.lookupDictionaryExamplesWithResponseAsync(sourceLanguage, targetLanguage, body,
requestOptions);
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param body Defines the content of the request.
* @param clientTraceId A client-generated GUID to uniquely identify the request.
* @param sourceLanguage Specifies the language of the input text. Find which languages are available to translate
* from by
* looking up supported languages using the translation scope. If the from parameter isn't specified,
* automatic language detection is applied to determine the source language.
*
* You must use the from parameter rather than autodetection when using the dynamic dictionary feature.
* Note: the dynamic dictionary feature is case-sensitive.
* @param textType Defines whether the text being translated is plain text or HTML text. Any HTML needs to be a
* well-formed,
* complete element. Possible values are: plain (default) or html.
* @param category A string specifying the category (domain) of the translation. This parameter is used to get
* translations
* from a customized system built with Custom Translator. Add the Category ID from your Custom Translator
* project details to this parameter to use your deployed customized system. Default value is: general.
* @param profanityAction Specifies how profanities should be treated in translations.
* Possible values are: NoAction (default), Marked or Deleted.
* @param profanityMarker Specifies how profanities should be marked in translations.
* Possible values are: Asterisk (default) or Tag.
* @param includeAlignment Specifies whether to include alignment projection from source text to translated text.
* Possible values are: true or false (default).
* @param includeSentenceLength Specifies whether to include sentence boundaries for the input text and the
* translated text.
* Possible values are: true or false (default).
* @param suggestedSourceLanguage Specifies a fallback language if the language of the input text can't be
* identified.
* Language autodetection is applied when the from parameter is omitted. If detection fails,
* the suggestedFrom language will be assumed.
* @param sourceLanguageScript Specifies the script of the input text.
* @param targetLanguageScript Specifies the script of the translated text.
* @param allowFallback Specifies that the service is allowed to fall back to a general system when a custom system
* doesn't exist.
* Possible values are: true (default) or false.
*
* allowFallback=false specifies that the translation should only use systems trained for the category specified
* by the request. If a translation for language X to language Y requires chaining through a pivot language E,
* then all the systems in the chain (X → E and E → Y) will need to be custom and have the same category.
* If no system is found with the specific category, the request will return a 400 status code. allowFallback=true
* specifies that the service is allowed to fall back to a general system when a custom system doesn't exist.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<TranslatedTextItem>> translate(List<String> targetLanguages, List<InputTextItem> body,
String clientTraceId, String sourceLanguage, TextType textType, String category,
ProfanityAction profanityAction, ProfanityMarker profanityMarker, Boolean includeAlignment,
Boolean includeSentenceLength, String suggestedSourceLanguage, String sourceLanguageScript,
String targetLanguageScript, Boolean allowFallback) {
RequestOptions requestOptions = new RequestOptions();
if (clientTraceId != null) {
requestOptions.setHeader(HttpHeaderName.fromString("X-ClientTraceId"), clientTraceId);
}
if (sourceLanguage != null) {
requestOptions.addQueryParam("from", sourceLanguage, false);
}
if (textType != null) {
requestOptions.addQueryParam("textType", textType.toString(), false);
}
if (category != null) {
requestOptions.addQueryParam("category", category, false);
}
if (profanityAction != null) {
requestOptions.addQueryParam("profanityAction", profanityAction.toString(), false);
}
if (profanityMarker != null) {
requestOptions.addQueryParam("profanityMarker", profanityMarker.toString(), false);
}
if (includeAlignment != null) {
requestOptions.addQueryParam("includeAlignment", String.valueOf(includeAlignment), false);
}
if (includeSentenceLength != null) {
requestOptions.addQueryParam("includeSentenceLength", String.valueOf(includeSentenceLength), false);
}
if (suggestedSourceLanguage != null) {
requestOptions.addQueryParam("suggestedFrom", suggestedSourceLanguage, false);
}
if (sourceLanguageScript != null) {
requestOptions.addQueryParam("fromScript", sourceLanguageScript, false);
}
if (targetLanguageScript != null) {
requestOptions.addQueryParam("toScript", targetLanguageScript, false);
}
if (allowFallback != null) {
requestOptions.addQueryParam("allowFallback", String.valueOf(allowFallback), false);
}
return translateWithResponse(targetLanguages, BinaryData.fromObject(body), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
*
* @param targetLanguages Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<TranslatedTextItem>> translateInner(List<String> targetLanguages, List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return translateWithResponse(targetLanguages, BinaryData.fromObject(body), requestOptions)
.flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM));
}
/**
* Translate Text.
* <p>
* This method is used when you have single target language and multiple texts to translate.
* </p>
*
* @param targetLanguage Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(String targetLanguage, List<String> texts) {
return translateInner(Arrays.asList(targetLanguage), convertTextToData(texts));
}
/**
* Translate Text.
* <p>
* This method is used when you have single target language and single text to translate.
* </p>
*
* @param targetLanguage Specifies the language of the output text. The target language must be one of the
* supported languages included
* in the translation scope. For example, use to=de to translate to German.
* It's possible to translate to multiple languages simultaneously by repeating the parameter in the query string.
* For example, use to=de&to=it to translate to German and Italian.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<TranslatedTextItem> translate(String targetLanguage, String text) {
return translate(targetLanguage, Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Translate Text.
* <p>
* This method is used when you have one input text and the optional parameters are needed such as specification
* of a source language, profanity handling etc.
* </p>
*
* @param text Text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<TranslatedTextItem> translate(String text, TranslateOptions translateOptions) {
return translate(Arrays.asList(text), translateOptions)
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Translate Text.
* <p>
* This method is used when you have multiple texts and the optional parameters are needed such as specification
* of a source language, profanity handling etc..
* </p>
*
* @param texts List of text to translate.
* @param translateOptions Translate Options.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TranslatedTextItem>> translate(List<String> texts, TranslateOptions translateOptions) {
List<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(.
*/
private Mono<List<TransliteratedText>> transliterateInner(String language, String sourceLanguageScript,
String targetLanguageScript, List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return transliterateWithResponse(language, sourceLanguageScript, targetLanguageScript,
BinaryData.fromObject(body), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT));
}
/**
* Transliterate Text.
* <p>
* This method is used when you have multiple texts to transliterate and you want to provide client trace id.
* </p>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<TransliteratedText>> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, List<String> body) {
return transliterateInner(language, sourceLanguageScript, targetLanguageScript, convertTextToData(body));
}
/**
* Transliterate Text.
* <p>
* This method is used when you have single text to transliterate and you want to provide client trace id.
* </p>
*
* @param language Specifies the language of the text to convert from one script to another.
* Possible languages are listed in the transliteration scope obtained by querying the service
* for its supported languages.
* @param sourceLanguageScript Specifies the script used by the input text. Look up supported languages using the
* transliteration scope,
* to find input scripts available for the selected language.
* @param targetLanguageScript Specifies the output script. Look up supported languages using the transliteration
* scope, to find output
* scripts available for the selected combination of input language and input script.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<TransliteratedText> transliterate(String language, String sourceLanguageScript,
String targetLanguageScript, String text) {
return transliterate(language, sourceLanguageScript, targetLanguageScript, Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Find Sentence Boundaries.
*
* @param body Defines the content of the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> body, String language,
String script) {
RequestOptions requestOptions = new RequestOptions();
if (language != null) {
requestOptions.addQueryParam("language", language, false);
}
if (script != null) {
requestOptions.addQueryParam("script", script, false);
}
return findSentenceBoundariesWithResponse(BinaryData.fromObject(body), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have multiple texts for which you want to find sentence boundaries and you want to
* provide
* client trace id.
* </p>
*
* @param texts Defines the content of the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts, String language, String script) {
return findSentenceBoundariesInner(convertTextToData(texts), language, script);
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have single text for which you want to find sentence boundaries and you want to
* provide
* client trace id.
* </p>
*
* @param text Defines the content of the request.
* @param language Language tag identifying the language of the input text.
* If a code isn't specified, automatic language detection will be applied.
* @param script Script tag identifying the script used by the input text.
* If a script isn't specified, the default script of the language will be assumed.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BreakSentenceItem> findSentenceBoundaries(String text, String language, String script) {
return findSentenceBoundaries(Arrays.asList(text), language, script)
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Find Sentence Boundaries.
*
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<BreakSentenceItem>> findSentenceBoundariesInner(List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return findSentenceBoundariesWithResponse(BinaryData.fromObject(body), requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM));
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have multiple texts for which you want to find sentence boundaries and you want
* the source language to be auto-detected by the service.
* </p>
*
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<BreakSentenceItem>> findSentenceBoundaries(List<String> texts) {
return findSentenceBoundariesInner(convertTextToData(texts));
}
/**
* Find Sentence Boundaries.
* <p>
* This method is used when you have single text for which you want to find sentence boundaries and you want
* the source language to be auto-detected by the service.
* </p>
*
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BreakSentenceItem> findSentenceBoundaries(String text) {
return findSentenceBoundaries(Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Lookup Dictionary Entries.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
private Mono<List<DictionaryLookupItem>> lookupDictionaryEntriesInner(String sourceLanguage, String targetLanguage,
List<InputTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryEntriesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(body),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM));
}
/**
* Lookup Dictionary Entries.
* <p>
* This method is used when you want lookup multiple entries in the dictionary and you want to provide
* client trace id.
* </p>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param texts Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<List<DictionaryLookupItem>> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
List<String> texts) {
return lookupDictionaryEntriesInner(sourceLanguage, targetLanguage, convertTextToData(texts));
}
/**
* Lookup Dictionary Entries.
* <p>
* This method is used when you want lookup single entry in the dictionary and you want to provide
* client trace id.
* </p>
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param text Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DictionaryLookupItem> lookupDictionaryEntries(String sourceLanguage, String targetLanguage,
String text) {
return lookupDictionaryEntries(sourceLanguage, targetLanguage, Arrays.asList(text))
.map(translatedTextItems -> translatedTextItems.isEmpty() ? null : translatedTextItems.get(0))
.defaultIfEmpty(null);
}
/**
* Lookup Dictionary Examples.
*
* @param sourceLanguage Specifies the language of the input text.
* The source language must be one of the supported languages included in the dictionary scope.
* @param targetLanguage Specifies the language of the output text.
* The target language must be one of the supported languages included in the dictionary scope.
* @param body Defines the content of the request.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response body on successful completion of {@link Mono}.
*/
public Mono<List<DictionaryExampleItem>> lookupDictionaryExamples(String sourceLanguage, String targetLanguage,
List<DictionaryExampleTextItem> body) {
RequestOptions requestOptions = new RequestOptions();
return lookupDictionaryExamplesWithResponse(sourceLanguage, targetLanguage, BinaryData.fromObject(body),
requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM));
}
@Generated
private static final TypeReference<List<TransliteratedText>> TYPE_REFERENCE_LIST_TRANSLITERATED_TEXT
= new TypeReference<List<TransliteratedText>>() {
};
@Generated
private static final TypeReference<List<BreakSentenceItem>> TYPE_REFERENCE_LIST_BREAK_SENTENCE_ITEM
= new TypeReference<List<BreakSentenceItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryLookupItem>> TYPE_REFERENCE_LIST_DICTIONARY_LOOKUP_ITEM
= new TypeReference<List<DictionaryLookupItem>>() {
};
@Generated
private static final TypeReference<List<DictionaryExampleItem>> TYPE_REFERENCE_LIST_DICTIONARY_EXAMPLE_ITEM
= new TypeReference<List<DictionaryExampleItem>>() {
};
@Generated
private static final TypeReference<List<TranslatedTextItem>> TYPE_REFERENCE_LIST_TRANSLATED_TEXT_ITEM
= new TypeReference<List<TranslatedTextItem>>() {
};
/**
* Gets the set of languages currently supported by other operations of the Translator.
* <p><strong>Query Parameters</strong></p>
* <table border="1">
* <caption>Query Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>scope</td><td>String</td><td>No</td><td>A comma-separated list of names defining the group of languages
* to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Header Parameters</strong></p>
* <table border="1">
* <caption>Header Parameters</caption>
* <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr>
* <tr><td>X-ClientTraceId</td><td>String</td><td>No</td><td>A client-generated GUID to uniquely identify the
* request.</td></tr>
* <tr><td>Accept-Language</td><td>String</td><td>No</td><td>The language to use for user interface strings. Some of
* the fields in the response are names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.</td></tr>
* <tr><td>If-None-Match</td><td>String</td><td>No</td><td>Passing the value of the ETag response header in an
* If-None-Match field will allow the service to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response
* body.</td></tr>
* </table>
* You can add these to a request with {@link RequestOptions
* <p><strong>Response Body Schema</strong></p>
*
* <pre>{@code
* {
* translation (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* }
* transliteration (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* scripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* toScripts (Required): [
* (Required){
* code: String (Required)
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* }
* ]
* }
* ]
* }
* }
* dictionary (Optional): {
* String (Required): {
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* translations (Required): [
* (Required){
* name: String (Required)
* nativeName: String (Required)
* dir: String(ltr/rtl) (Required)
* code: String (Required)
* }
* ]
* }
* }
* }
* }</pre>
*
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @return the set of languages currently supported by other operations of the Translator along with
* {@link Response} on successful completion of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BinaryData>> getSupportedLanguagesWithResponse(RequestOptions requestOptions) {
return this.serviceClient.getSupportedLanguagesWithResponseAsync(requestOptions);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param scope A comma-separated list of names defining the group of languages to return.
* Allowed group names are: `translation`, `transliteration` and `dictionary`.
* If no scope is given, then all groups are returned, which is equivalent to passing
* `scope=translation,transliteration,dictionary`. To decide which set of supported languages
* is appropriate for your scenario, see the description of the [response object](
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
private Mono<GetSupportedLanguagesResult> getSupportedLanguages(String scope, String acceptLanguage,
String ifNoneMatch) {
RequestOptions requestOptions = new RequestOptions();
if (scope != null) {
requestOptions.addQueryParam("scope", scope, false);
}
if (acceptLanguage != null) {
requestOptions.setHeader(HttpHeaderName.ACCEPT_LANGUAGE, acceptLanguage);
}
if (ifNoneMatch != null) {
requestOptions.setHeader(HttpHeaderName.IF_NONE_MATCH, ifNoneMatch);
}
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @param scopes List of names defining the group of languages to return.
* @param acceptLanguage The language to use for user interface strings. Some of the fields in the response are
* names of languages or
* names of regions. Use this parameter to define the language in which these names are returned.
* The language is specified by providing a well-formed BCP 47 language tag. For instance, use the value `fr`
* to request names in French or use the value `zh-Hant` to request names in Chinese Traditional.
* Names are provided in the English language when a target language is not specified or when localization
* is not available.
* @param ifNoneMatch Passing the value of the ETag response header in an If-None-Match field will allow the service
* to optimize the response.
* If the resource has not been modified, the service will return status code 304 and an empty response body.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages(List<LanguageScope> scopes, String acceptLanguage,
String ifNoneMatch) {
return getSupportedLanguages(convertToScopesString(scopes), acceptLanguage, ifNoneMatch);
}
/**
* Gets the set of languages currently supported by other operations of the Translator.
*
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.
* @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.
* @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the set of languages currently supported by other operations of the Translator on successful completion
* of {@link Mono}.
*/
@Generated
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<GetSupportedLanguagesResult> getSupportedLanguages() {
RequestOptions requestOptions = new RequestOptions();
return getSupportedLanguagesWithResponse(requestOptions).flatMap(FluxUtil::toMono)
.map(protocolMethodData -> protocolMethodData.toObject(GetSupportedLanguagesResult.class));
}
private List<InputTextItem> convertTextToData(List<String> texts) {
List<InputTextItem> content = new ArrayList<>();
for (String text : texts) {
content.add(new InputTextItem(text));
}
return content;
}
private String convertToScopesString(List<LanguageScope> scopes) {
if (scopes == null) {
return null;
}
String result = "";
for (LanguageScope scope : scopes) {
if (!result.isEmpty()) {
result += ",";
}
result += scope.toString();
}
return result;
}
} |
closing is better than reading to the end in edge cases: - we don't need to waste time/resources on reading (unlikely) big response - if there is a transient issue during stream reading, we don't need to handle/retry it. | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> {
if (authorized) {
httpResponse.close();
return nextPolicy.process();
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
} | } | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> {
if (authorized) {
httpResponse.close();
return nextPolicy.process();
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
} | class BearerTokenAuthenticationPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(BearerTokenAuthenticationPolicy.class);
private static final String BEARER = "Bearer";
private final String[] scopes;
private final AccessTokenCache cache;
/**
* Creates BearerTokenAuthenticationPolicy.
*
* @param credential the token credential to authenticate the request
* @param scopes the scopes of authentication the credential should get token for
*/
public BearerTokenAuthenticationPolicy(TokenCredential credential, String... scopes) {
Objects.requireNonNull(credential);
this.scopes = scopes;
this.cache = new AccessTokenCache(credential);
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
if (this.scopes == null) {
return Mono.empty();
}
return setAuthorizationHeaderHelper(context, new TokenRequestContext().addScopes(this.scopes), false);
}
/**
* Synchronously executed before sending the initial request and authenticates the request.
*
* @param context The request context.
*/
public void authorizeRequestSync(HttpPipelineCallContext context) {
setAuthorizationHeaderHelperSync(context, new TokenRequestContext().addScopes(scopes), false);
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication challenge
* header is received after the initial request and returns appropriate {@link TokenRequestContext} to be used for
* re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link TokenRequestContext}
*/
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.just(false);
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication challenge
* header is received after the initial request and returns appropriate {@link TokenRequestContext} to be used for
* re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A boolean indicating if containing the {@link TokenRequestContext} for re-authentication
*/
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
return false;
}
@Override
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return setAuthorizationHeaderHelper(context, tokenRequestContext, true);
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
*/
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
setAuthorizationHeaderHelperSync(context, tokenRequestContext, true);
}
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context,
TokenRequestContext tokenRequestContext, boolean checkToForceFetchToken) {
return cache.getToken(tokenRequestContext, checkToForceFetchToken).flatMap(token -> {
setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken());
return Mono.empty();
});
}
private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context,
TokenRequestContext tokenRequestContext, boolean checkToForceFetchToken) {
AccessToken token = cache.getTokenSync(tokenRequestContext, checkToForceFetchToken);
setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken());
}
private static void setAuthorizationHeader(HttpHeaders headers, String token) {
headers.set(HttpHeaderName.AUTHORIZATION, BEARER + " " + token);
}
} | class BearerTokenAuthenticationPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(BearerTokenAuthenticationPolicy.class);
private static final String BEARER = "Bearer";
private final String[] scopes;
private final AccessTokenCache cache;
/**
* Creates BearerTokenAuthenticationPolicy.
*
* @param credential the token credential to authenticate the request
* @param scopes the scopes of authentication the credential should get token for
*/
public BearerTokenAuthenticationPolicy(TokenCredential credential, String... scopes) {
Objects.requireNonNull(credential);
this.scopes = scopes;
this.cache = new AccessTokenCache(credential);
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
if (this.scopes == null) {
return Mono.empty();
}
return setAuthorizationHeaderHelper(context, new TokenRequestContext().addScopes(this.scopes), false);
}
/**
* Synchronously executed before sending the initial request and authenticates the request.
*
* @param context The request context.
*/
public void authorizeRequestSync(HttpPipelineCallContext context) {
setAuthorizationHeaderHelperSync(context, new TokenRequestContext().addScopes(scopes), false);
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication challenge
* header is received after the initial request and returns appropriate {@link TokenRequestContext} to be used for
* re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link TokenRequestContext}
*/
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.just(false);
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication challenge
* header is received after the initial request and returns appropriate {@link TokenRequestContext} to be used for
* re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A boolean indicating if containing the {@link TokenRequestContext} for re-authentication
*/
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
return false;
}
@Override
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return setAuthorizationHeaderHelper(context, tokenRequestContext, true);
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
*/
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
setAuthorizationHeaderHelperSync(context, tokenRequestContext, true);
}
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context,
TokenRequestContext tokenRequestContext, boolean checkToForceFetchToken) {
return cache.getToken(tokenRequestContext, checkToForceFetchToken).flatMap(token -> {
setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken());
return Mono.empty();
});
}
private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context,
TokenRequestContext tokenRequestContext, boolean checkToForceFetchToken) {
AccessToken token = cache.getTokenSync(tokenRequestContext, checkToForceFetchToken);
setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken());
}
private static void setAuthorizationHeader(HttpHeaders headers, String token) {
headers.set(HttpHeaderName.AUTHORIZATION, BEARER + " " + token);
}
} |
Do we want to use defensive copying / return an immutable copy? - this will force downstream application to use `addOperationPolicy`. | List<CosmosOperationPolicy> getOperationPolicies() {
return requestPolicies;
} | return requestPolicies; | List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
this.requestPolicies.add(policy);
return this;
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
checkNotNull(policy, "Argument 'policy' must not be null.");
this.requestPolicies.add(policy);
return this;
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} |
`deploy` = `pushDeploy` + `pollDeploymentStatus` | public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
return this.pushDeployAsync(type, file, null)
.flatMap(result -> kuduClient.pollDeploymentStatus(result, manager().serviceClient().getDefaultPollInterval()));
} | } | public Mono<Void> deployAsync(DeployType type, File file, DeployOptions deployOptions) {
return this.pushDeployAsync(type, file, null)
.flatMap(result -> kuduClient.pollDeploymentStatus(result, manager().serviceClient().getDefaultPollInterval()));
} | class FunctionAppImpl
extends AppServiceBaseImpl<
FunctionApp, FunctionAppImpl, FunctionApp.DefinitionStages.WithCreate, FunctionApp.Update>
implements FunctionApp,
FunctionApp.Definition,
FunctionApp.DefinitionStages.NewAppServicePlanWithGroup,
FunctionApp.DefinitionStages.ExistingLinuxPlanWithGroup,
FunctionApp.Update {
private static final ClientLogger LOGGER = new ClientLogger(FunctionAppImpl.class);
private static final String SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING =
"WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String SETTING_WEBSITE_CONTENTSHARE = "WEBSITE_CONTENTSHARE";
private static final String SETTING_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String SETTING_WEB_JOBS_DASHBOARD = "AzureWebJobsDashboard";
private Creatable<StorageAccount> storageAccountCreatable;
private StorageAccount storageAccountToSet;
private StorageAccount currentStorageAccount;
private FunctionService functionService;
private FunctionDeploymentSlots deploymentSlots;
private String functionServiceHost;
private Boolean appServicePlanIsFlexConsumption;
FunctionAppImpl(
final String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
if (!isInCreateMode()) {
initializeFunctionService();
}
}
private void initializeFunctionService() {
if (functionService == null) {
UrlBuilder urlBuilder = UrlBuilder.parse(this.defaultHostname());
String baseUrl;
if (urlBuilder.getScheme() == null) {
urlBuilder.setScheme("https");
}
try {
baseUrl = urlBuilder.toUrl().toString();
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
for (int i = 0, count = manager().httpPipeline().getPolicyCount(); i < count; ++i) {
HttpPipelinePolicy policy = manager().httpPipeline().getPolicy(i);
if (!(policy instanceof AuthenticationPolicy)
&& !(policy instanceof ProviderRegistrationPolicy)
&& !(policy instanceof AuxiliaryAuthenticationPolicy)) {
policies.add(policy);
}
}
policies.add(new FunctionAuthenticationPolicy(this));
HttpPipeline httpPipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(manager().httpPipeline().getHttpClient())
.build();
functionServiceHost = baseUrl;
functionService =
RestProxy.create(FunctionService.class, httpPipeline,
SerializerFactory.createDefaultManagementSerializerAdapter());
}
}
@Override
public void setInner(SiteInner innerObject) {
super.setInner(innerObject);
}
@Override
public FunctionDeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new FunctionDeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public FunctionAppImpl withNewConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withRuntime(String runtime) {
return withAppSetting(SETTING_FUNCTIONS_WORKER_RUNTIME, runtime);
}
@Override
public FunctionAppImpl withRuntimeVersion(String version) {
return withAppSetting(SETTING_FUNCTIONS_EXTENSION_VERSION, version.startsWith("~") ? version : "~" + version);
}
@Override
public FunctionAppImpl withLatestRuntimeVersion() {
return withRuntimeVersion("latest");
}
@Override
Mono<SiteInner> submitSite(SiteInner site) {
if (isFunctionAppOnACA()) {
return createOrUpdateInner(site);
} else {
return super.submitSite(site);
}
}
@Override
Mono<SiteInner> submitSite(SitePatchResourceInner siteUpdate) {
if (isFunctionAppOnACA()) {
return updateInner(siteUpdate);
} else {
return super.submitSite(siteUpdate);
}
}
@Override
Mono<Indexable> submitAppSettings() {
if (storageAccountCreatable != null && this.taskResult(storageAccountCreatable.key()) != null) {
storageAccountToSet = this.taskResult(storageAccountCreatable.key());
}
if (storageAccountToSet == null) {
return super.submitAppSettings();
} else {
return storageAccountToSet
.getKeysAsync()
.flatMap(storageAccountKeys -> {
StorageAccountKey key = storageAccountKeys.get(0);
String connectionString = ResourceManagerUtils
.getStorageConnectionString(storageAccountToSet.name(), key.value(),
manager().environment());
addAppSettingIfNotModified(SETTING_WEB_JOBS_STORAGE, connectionString);
if (!isFunctionAppOnACA()) {
addAppSettingIfNotModified(SETTING_WEB_JOBS_DASHBOARD, connectionString);
return this.manager().appServicePlans().getByIdAsync(this.appServicePlanId())
.flatMap(appServicePlan -> {
if (appServicePlan == null
|| isConsumptionOrPremiumAppServicePlan(appServicePlan.pricingTier())) {
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING, connectionString);
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTSHARE,
this.manager().resourceManager().internalContext()
.randomResourceName(name(), 32));
}
return FunctionAppImpl.super.submitAppSettings();
});
} else {
return FunctionAppImpl.super.submitAppSettings();
}
}).then(
Mono
.fromCallable(
() -> {
currentStorageAccount = storageAccountToSet;
storageAccountToSet = null;
storageAccountCreatable = null;
return this;
}));
}
}
@Override
public OperatingSystem operatingSystem() {
if (isFunctionAppOnACA()) {
return OperatingSystem.LINUX;
}
return (innerModel().reserved() == null || !innerModel().reserved())
? OperatingSystem.WINDOWS : OperatingSystem.LINUX;
}
private void addAppSettingIfNotModified(String key, String value) {
if (!appSettingModified(key)) {
withAppSetting(key, value);
}
}
private boolean appSettingModified(String key) {
return (appSettingsToAdd != null && appSettingsToAdd.containsKey(key))
|| (appSettingsToRemove != null && appSettingsToRemove.contains(key));
}
private static boolean isConsumptionOrPremiumAppServicePlan(PricingTier pricingTier) {
if (pricingTier == null || pricingTier.toSkuDescription() == null) {
return true;
}
SkuDescription description = pricingTier.toSkuDescription();
return SkuName.DYNAMIC.toString().equalsIgnoreCase(description.tier())
|| SkuName.ELASTIC_PREMIUM.toString().equalsIgnoreCase(description.tier());
}
@Override
FunctionAppImpl withNewAppServicePlan(OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
FunctionAppImpl withNewAppServicePlan(
String appServicePlan, OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlan, operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
public FunctionAppImpl withExistingAppServicePlan(AppServicePlan appServicePlan) {
super.withExistingAppServicePlan(appServicePlan);
return autoSetAlwaysOn(appServicePlan.pricingTier());
}
private FunctionAppImpl autoSetAlwaysOn(PricingTier pricingTier) {
SkuDescription description = pricingTier.toSkuDescription();
if (description.tier().equalsIgnoreCase(SkuName.FREE.toString())
|| description.tier().equalsIgnoreCase(SkuName.SHARED.toString())
|| description.tier().equalsIgnoreCase(SkuName.DYNAMIC.toString())
|| description.tier().equalsIgnoreCase(SkuName.ELASTIC_PREMIUM.toString())
|| description.tier().equalsIgnoreCase(SkuName.ELASTIC_ISOLATED.toString())) {
return withWebAppAlwaysOn(false);
} else {
return withWebAppAlwaysOn(true);
}
}
@Override
public FunctionAppImpl withNewStorageAccount(String name, StorageAccountSkuType sku) {
StorageAccount.DefinitionStages.WithGroup storageDefine =
manager().storageManager().storageAccounts().define(name).withRegion(regionName());
if (super.creatableGroup != null && isInCreateMode()) {
storageAccountCreatable =
storageDefine
.withNewResourceGroup(super.creatableGroup)
.withGeneralPurposeAccountKindV2()
.withSku(sku);
} else {
storageAccountCreatable =
storageDefine
.withExistingResourceGroup(resourceGroupName())
.withGeneralPurposeAccountKindV2()
.withSku(sku);
}
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withNewStorageAccount(Creatable<StorageAccount> storageAccount) {
storageAccountCreatable = storageAccount;
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withExistingStorageAccount(StorageAccount storageAccount) {
this.storageAccountToSet = storageAccount;
return this;
}
@Override
public FunctionAppImpl withDailyUsageQuota(int quota) {
innerModel().withDailyMemoryTimeQuota(quota);
return this;
}
@Override
public FunctionAppImpl withoutDailyUsageQuota() {
return withDailyUsageQuota(0);
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(Creatable<AppServicePlan> appServicePlanCreatable) {
super.withNewAppServicePlan(appServicePlanCreatable);
if (appServicePlanCreatable instanceof AppServicePlan) {
this.autoSetAlwaysOn(((AppServicePlan) appServicePlanCreatable).pricingTier());
}
return this;
}
@Override
public FunctionAppImpl withExistingLinuxAppServicePlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan).autoSetAlwaysOn(appServicePlan.pricingTier());
}
@Override
public FunctionAppImpl withBuiltInImage(final FunctionRuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
withRuntime(runtimeStack.runtime());
withRuntimeVersion(runtimeStack.version());
siteConfig.withLinuxFxVersion(runtimeStack.getLinuxFxVersion());
return this;
}
@Override
public FunctionAppImpl withPublicDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateRegistryImage(String imageAndTag, String serverUrl) {
ensureLinuxPlan();
super.withPrivateRegistryImage(imageAndTag, serverUrl);
if (isFunctionAppOnACA()) {
try {
URL url = new URL(serverUrl);
withAppSetting(SETTING_REGISTRY_SERVER, url.getAuthority() + url.getFile());
} catch (MalformedURLException e) {
}
}
return this;
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
if (siteConfig != null && siteConfig.windowsFxVersion() != null) {
siteConfig.withWindowsFxVersion(null);
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
protected OperatingSystem appServicePlanOperatingSystem(AppServicePlan appServicePlan) {
return (appServicePlan.innerModel().reserved() == null || !appServicePlan.innerModel().reserved())
? OperatingSystem.WINDOWS
: OperatingSystem.LINUX;
}
@Override
public StorageAccount storageAccount() {
return currentStorageAccount;
}
@Override
public String getMasterKey() {
return getMasterKeyAsync().block();
}
@Override
public Mono<String> getMasterKeyAsync() {
return this.manager().serviceClient().getWebApps().listHostKeysAsync(resourceGroupName(), name())
.map(HostKeysInner::masterKey);
}
@Override
public PagedIterable<FunctionEnvelope> listFunctions() {
return this.manager().functionApps().listFunctions(resourceGroupName(), name());
}
@Override
public Map<String, String> listFunctionKeys(String functionName) {
return listFunctionKeysAsync(functionName).block();
}
@Override
public Mono<Map<String, String>> listFunctionKeysAsync(final String functionName) {
return functionService
.listFunctionKeys(functionServiceHost, functionName)
.map(
result -> {
Map<String, String> keys = new HashMap<>();
if (result.keys != null) {
for (NameValuePair pair : result.keys) {
keys.put(pair.name(), pair.value());
}
}
return keys;
});
}
@Override
public NameValuePair addFunctionKey(String functionName, String keyName, String keyValue) {
return addFunctionKeyAsync(functionName, keyName, keyValue).block();
}
@Override
public Mono<NameValuePair> addFunctionKeyAsync(String functionName, String keyName, String keyValue) {
if (keyValue != null) {
return functionService
.addFunctionKey(
functionServiceHost,
functionName,
keyName,
new NameValuePair().withName(keyName).withValue(keyValue));
} else {
return functionService.generateFunctionKey(functionServiceHost, functionName, keyName);
}
}
@Override
public void removeFunctionKey(String functionName, String keyName) {
removeFunctionKeyAsync(functionName, keyName).block();
}
@Override
public Mono<Void> removeFunctionKeyAsync(String functionName, String keyName) {
return functionService.deleteFunctionKey(functionServiceHost, functionName, keyName);
}
@Override
public void triggerFunction(String functionName, Object payload) {
triggerFunctionAsync(functionName, payload).block();
}
@Override
public Mono<Void> triggerFunctionAsync(String functionName, Object payload) {
return functionService.triggerFunction(functionServiceHost, functionName, payload);
}
@Override
public void syncTriggers() {
syncTriggersAsync().block();
}
@Override
public Mono<Void> syncTriggersAsync() {
return manager()
.serviceClient()
.getWebApps()
.syncFunctionTriggersAsync(resourceGroupName(), name())
.onErrorResume(
throwable -> {
if (throwable instanceof ManagementException
&& ((ManagementException) throwable).getResponse().getStatusCode() == 200) {
return Mono.empty();
} else {
return Mono.error(throwable);
}
});
}
@Override
public String managedEnvironmentId() {
return innerModel().managedEnvironmentId();
}
@Override
public Integer maxReplicas() {
if (this.siteConfig == null) {
return null;
}
return this.siteConfig.functionAppScaleLimit();
}
@Override
public Integer minReplicas() {
if (this.siteConfig == null) {
return null;
}
return this.siteConfig.minimumElasticInstanceCount();
}
@Override
public Flux<String> streamApplicationLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamApplicationLogsAsync());
}
@Override
public Flux<String> streamHttpLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamHttpLogsAsync());
}
@Override
public Flux<String> streamTraceLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamTraceLogsAsync());
}
@Override
public Flux<String> streamDeploymentLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamDeploymentLogsAsync());
}
@Override
public Flux<String> streamAllLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamAllLogsAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length);
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
public void beforeGroupCreateOrUpdate() {
if (isFunctionAppOnACA()) {
adaptForFunctionAppOnACA();
}
super.beforeGroupCreateOrUpdate();
}
private void adaptForFunctionAppOnACA() {
this.innerModel().withReserved(null);
if (this.siteConfig != null) {
SiteConfigInner siteConfigInner = new SiteConfigInner();
siteConfigInner.withLinuxFxVersion(this.siteConfig.linuxFxVersion());
siteConfigInner.withMinimumElasticInstanceCount(this.siteConfig.minimumElasticInstanceCount());
siteConfigInner.withFunctionAppScaleLimit(this.siteConfig.functionAppScaleLimit());
siteConfigInner.withAppSettings(this.siteConfig.appSettings() == null ? new ArrayList<>() : this.siteConfig.appSettings());
if (!appSettingsToAdd.isEmpty() || !appSettingsToRemove.isEmpty()) {
for (String settingToRemove : appSettingsToRemove) {
siteConfigInner.appSettings().removeIf(kvPair -> Objects.equals(settingToRemove, kvPair.name()));
}
for (Map.Entry<String, String> entry : appSettingsToAdd.entrySet()) {
siteConfigInner.appSettings().add(new NameValuePair().withName(entry.getKey()).withValue(entry.getValue()));
}
}
this.innerModel().withSiteConfig(siteConfigInner);
}
}
@Override
public Mono<FunctionApp> createAsync() {
if (this.isInCreateMode()) {
if (innerModel().serverFarmId() == null && !isFunctionAppOnACA()) {
withNewConsumptionPlan();
}
if (currentStorageAccount == null && storageAccountToSet == null && storageAccountCreatable == null) {
withNewStorageAccount(
this.manager().resourceManager().internalContext()
.randomResourceName(getStorageAccountName(), 20),
StorageAccountSkuType.STANDARD_LRS);
}
}
return super.createAsync();
}
@Override
public Mono<Void> afterPostRunAsync(final boolean isGroupFaulted) {
if (!isGroupFaulted) {
initializeFunctionService();
}
return super.afterPostRunAsync(isGroupFaulted);
}
@Override
public FunctionAppImpl withManagedEnvironmentId(String managedEnvironmentId) {
this.innerModel().withManagedEnvironmentId(managedEnvironmentId);
if (!CoreUtils.isNullOrEmpty(managedEnvironmentId)) {
this.innerModel().withKind("functionapp,linux,container,azurecontainerapps");
if (this.siteConfig == null) {
this.siteConfig = new SiteConfigResourceInner().withAppSettings(new ArrayList<>());
}
}
return this;
}
@Override
public FunctionAppImpl withMaxReplicas(int maxReplicas) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withFunctionAppScaleLimit(maxReplicas);
return this;
}
@Override
public FunctionAppImpl withMinReplicas(int minReplicas) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withMinimumElasticInstanceCount(minReplicas);
return this;
}
@Override
public Mono<Map<String, AppSetting>> getAppSettingsAsync() {
if (isFunctionAppOnACA()) {
return listAppSettings()
.map(
appSettingsInner ->
appSettingsInner
.properties()
.entrySet()
.stream()
.collect(
Collectors
.toMap(
Map.Entry::getKey,
entry ->
new AppSettingImpl(
entry.getKey(),
entry.getValue(),
false))));
} else {
return super.getAppSettingsAsync();
}
}
/**
* Whether this Function App is on Azure Container Apps environment.
*
* @return whether this Function App is on Azure Container Apps environment
*/
boolean isFunctionAppOnACA() {
return isFunctionAppOnACA(innerModel());
}
static boolean isFunctionAppOnACA(SiteInner siteInner) {
return siteInner != null && !CoreUtils.isNullOrEmpty(siteInner.managedEnvironmentId());
}
@Override
Mono<SiteInner> updateInner(SitePatchResourceInner siteUpdate) {
Mono<SiteInner> updateInner = super.updateInner(siteUpdate);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(updateInner);
} else {
return updateInner;
}
}
@Override
Mono<SiteConfigResourceInner> createOrUpdateSiteConfig(SiteConfigResourceInner siteConfig) {
Mono<SiteConfigResourceInner> createOrUpdateSiteConfig = super.createOrUpdateSiteConfig(siteConfig);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(createOrUpdateSiteConfig);
} else {
return createOrUpdateSiteConfig;
}
}
@Override
Mono<StringDictionaryInner> updateAppSettings(StringDictionaryInner inner) {
Mono<StringDictionaryInner> updateAppSettings = super.updateAppSettings(inner);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(updateAppSettings);
} else {
return updateAppSettings;
}
}
@Override
Mono<ConnectionStringDictionaryInner> updateConnectionStrings(ConnectionStringDictionaryInner inner) {
Mono<ConnectionStringDictionaryInner> updateConnectionStrings = super.updateConnectionStrings(inner);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(updateConnectionStrings);
} else {
return updateConnectionStrings;
}
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, null);
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, null).block();
}
@Override
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, null);
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, null).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
return this.pushDeployAsync(type, file, length, null)
.flatMap(result -> kuduClient.pollDeploymentStatus(result, manager().serviceClient().getDefaultPollInterval()));
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
if (type != DeployType.ZIP) {
return Mono.error(new IllegalArgumentException("Deployment to Function App supports ZIP package."));
}
return getAppServicePlanIsFlexConsumptionMono().flatMap(appServiceIsFlexConsumptionPlan -> {
try {
if (appServiceIsFlexConsumptionPlan) {
return kuduClient.pushDeployFlexConsumptionAsync(file);
} else {
return kuduClient.pushZipDeployAsync(file)
.then(Mono.just(new KuduDeploymentResult("latest")));
}
} catch (IOException e) {
return Mono.error(e);
}
});
}
private Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, InputStream file, long length,
DeployOptions deployOptions) {
if (type != DeployType.ZIP) {
return Mono.error(new IllegalArgumentException("Deployment to Function App supports ZIP package."));
}
return getAppServicePlanIsFlexConsumptionMono().flatMap(appServiceIsFlexConsumptionPlan -> {
try {
if (appServiceIsFlexConsumptionPlan) {
return kuduClient.pushDeployFlexConsumptionAsync(file, length);
} else {
return kuduClient.pushZipDeployAsync(file, length)
.then(Mono.just(new KuduDeploymentResult("latest")));
}
} catch (IOException e) {
return Mono.error(e);
}
});
}
private Mono<Boolean> getAppServicePlanIsFlexConsumptionMono() {
Mono<Boolean> updateAppServicePlan = Mono.justOrEmpty(appServicePlanIsFlexConsumption);
if (appServicePlanIsFlexConsumption == null) {
updateAppServicePlan = Mono.defer(
() -> manager().appServicePlans()
.getByIdAsync(this.appServicePlanId())
.map(appServicePlan -> {
appServicePlanIsFlexConsumption = "FlexConsumption".equals(appServicePlan.pricingTier().toSkuDescription().tier());
return appServicePlanIsFlexConsumption;
}));
}
return updateAppServicePlan;
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
}).doFinally(ignored -> response.close());
});
}
@Host("{$host}")
@ServiceInterface(name = "FunctionService")
private interface FunctionService {
@Headers({
"Accept: application/json",
"Content-Type: application/json; charset=utf-8"
})
@Get("admin/functions/{name}/keys")
Mono<FunctionKeyListResult> listFunctionKeys(
@HostParam("$host") String host, @PathParam("name") String functionName);
@Headers({
"Accept: application/json",
"Content-Type: application/json; charset=utf-8"
})
@Put("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> addFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName,
@BodyParam("application/json") NameValuePair key);
@Headers({
"Accept: application/json",
"Content-Type: application/json; charset=utf-8"
})
@Post("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> generateFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Delete("admin/functions/{name}/keys/{keyName}")
Mono<Void> deleteFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Post("admin/host/ping")
Mono<Void> ping(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Get("admin/host/status")
Mono<Void> getHostStatus(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Post("admin/functions/{name}")
Mono<Void> triggerFunction(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@BodyParam("application/json") Object payload);
}
private static class FunctionKeyListResult implements JsonSerializable<FunctionKeyListResult> {
private List<NameValuePair> keys;
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
return jsonWriter
.writeStartObject()
.writeArrayField("keys", keys, JsonWriter::writeJson)
.writeEndObject();
}
public static FunctionKeyListResult fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
FunctionKeyListResult result = new FunctionKeyListResult();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("keys".equals(fieldName)) {
List<NameValuePair> keys = reader.readArray(reader1 ->
reader1.readObject(NameValuePair::fromJson));
result.keys = keys;
} else {
reader.skipChildren();
}
}
return result;
});
}
}
private String getStorageAccountName() {
return name().replaceAll("[^a-zA-Z0-9]", "");
}
} | class FunctionAppImpl
extends AppServiceBaseImpl<
FunctionApp, FunctionAppImpl, FunctionApp.DefinitionStages.WithCreate, FunctionApp.Update>
implements FunctionApp,
FunctionApp.Definition,
FunctionApp.DefinitionStages.NewAppServicePlanWithGroup,
FunctionApp.DefinitionStages.ExistingLinuxPlanWithGroup,
FunctionApp.Update {
private static final ClientLogger LOGGER = new ClientLogger(FunctionAppImpl.class);
private static final String SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING =
"WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String SETTING_WEBSITE_CONTENTSHARE = "WEBSITE_CONTENTSHARE";
private static final String SETTING_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String SETTING_WEB_JOBS_DASHBOARD = "AzureWebJobsDashboard";
private Creatable<StorageAccount> storageAccountCreatable;
private StorageAccount storageAccountToSet;
private StorageAccount currentStorageAccount;
private FunctionService functionService;
private FunctionDeploymentSlots deploymentSlots;
private String functionServiceHost;
private Boolean appServicePlanIsFlexConsumption;
FunctionAppImpl(
final String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
if (!isInCreateMode()) {
initializeFunctionService();
}
}
private void initializeFunctionService() {
if (functionService == null) {
UrlBuilder urlBuilder = UrlBuilder.parse(this.defaultHostname());
String baseUrl;
if (urlBuilder.getScheme() == null) {
urlBuilder.setScheme("https");
}
try {
baseUrl = urlBuilder.toUrl().toString();
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
for (int i = 0, count = manager().httpPipeline().getPolicyCount(); i < count; ++i) {
HttpPipelinePolicy policy = manager().httpPipeline().getPolicy(i);
if (!(policy instanceof AuthenticationPolicy)
&& !(policy instanceof ProviderRegistrationPolicy)
&& !(policy instanceof AuxiliaryAuthenticationPolicy)) {
policies.add(policy);
}
}
policies.add(new FunctionAuthenticationPolicy(this));
HttpPipeline httpPipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(manager().httpPipeline().getHttpClient())
.build();
functionServiceHost = baseUrl;
functionService =
RestProxy.create(FunctionService.class, httpPipeline,
SerializerFactory.createDefaultManagementSerializerAdapter());
}
}
@Override
public void setInner(SiteInner innerObject) {
super.setInner(innerObject);
}
@Override
public FunctionDeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new FunctionDeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public FunctionAppImpl withNewConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withRuntime(String runtime) {
return withAppSetting(SETTING_FUNCTIONS_WORKER_RUNTIME, runtime);
}
@Override
public FunctionAppImpl withRuntimeVersion(String version) {
return withAppSetting(SETTING_FUNCTIONS_EXTENSION_VERSION, version.startsWith("~") ? version : "~" + version);
}
@Override
public FunctionAppImpl withLatestRuntimeVersion() {
return withRuntimeVersion("latest");
}
@Override
Mono<SiteInner> submitSite(SiteInner site) {
if (isFunctionAppOnACA()) {
return createOrUpdateInner(site);
} else {
return super.submitSite(site);
}
}
@Override
Mono<SiteInner> submitSite(SitePatchResourceInner siteUpdate) {
if (isFunctionAppOnACA()) {
return updateInner(siteUpdate);
} else {
return super.submitSite(siteUpdate);
}
}
@Override
Mono<Indexable> submitAppSettings() {
if (storageAccountCreatable != null && this.taskResult(storageAccountCreatable.key()) != null) {
storageAccountToSet = this.taskResult(storageAccountCreatable.key());
}
if (storageAccountToSet == null) {
return super.submitAppSettings();
} else {
return storageAccountToSet
.getKeysAsync()
.flatMap(storageAccountKeys -> {
StorageAccountKey key = storageAccountKeys.get(0);
String connectionString = ResourceManagerUtils
.getStorageConnectionString(storageAccountToSet.name(), key.value(),
manager().environment());
addAppSettingIfNotModified(SETTING_WEB_JOBS_STORAGE, connectionString);
if (!isFunctionAppOnACA()) {
addAppSettingIfNotModified(SETTING_WEB_JOBS_DASHBOARD, connectionString);
return this.manager().appServicePlans().getByIdAsync(this.appServicePlanId())
.flatMap(appServicePlan -> {
if (appServicePlan == null
|| isConsumptionOrPremiumAppServicePlan(appServicePlan.pricingTier())) {
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING, connectionString);
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTSHARE,
this.manager().resourceManager().internalContext()
.randomResourceName(name(), 32));
}
return FunctionAppImpl.super.submitAppSettings();
});
} else {
return FunctionAppImpl.super.submitAppSettings();
}
}).then(
Mono
.fromCallable(
() -> {
currentStorageAccount = storageAccountToSet;
storageAccountToSet = null;
storageAccountCreatable = null;
return this;
}));
}
}
@Override
public OperatingSystem operatingSystem() {
if (isFunctionAppOnACA()) {
return OperatingSystem.LINUX;
}
return (innerModel().reserved() == null || !innerModel().reserved())
? OperatingSystem.WINDOWS : OperatingSystem.LINUX;
}
private void addAppSettingIfNotModified(String key, String value) {
if (!appSettingModified(key)) {
withAppSetting(key, value);
}
}
private boolean appSettingModified(String key) {
return (appSettingsToAdd != null && appSettingsToAdd.containsKey(key))
|| (appSettingsToRemove != null && appSettingsToRemove.contains(key));
}
private static boolean isConsumptionOrPremiumAppServicePlan(PricingTier pricingTier) {
if (pricingTier == null || pricingTier.toSkuDescription() == null) {
return true;
}
SkuDescription description = pricingTier.toSkuDescription();
return SkuName.DYNAMIC.toString().equalsIgnoreCase(description.tier())
|| SkuName.ELASTIC_PREMIUM.toString().equalsIgnoreCase(description.tier());
}
@Override
FunctionAppImpl withNewAppServicePlan(OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
FunctionAppImpl withNewAppServicePlan(
String appServicePlan, OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlan, operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
public FunctionAppImpl withExistingAppServicePlan(AppServicePlan appServicePlan) {
super.withExistingAppServicePlan(appServicePlan);
return autoSetAlwaysOn(appServicePlan.pricingTier());
}
private FunctionAppImpl autoSetAlwaysOn(PricingTier pricingTier) {
SkuDescription description = pricingTier.toSkuDescription();
if (description.tier().equalsIgnoreCase(SkuName.FREE.toString())
|| description.tier().equalsIgnoreCase(SkuName.SHARED.toString())
|| description.tier().equalsIgnoreCase(SkuName.DYNAMIC.toString())
|| description.tier().equalsIgnoreCase(SkuName.ELASTIC_PREMIUM.toString())
|| description.tier().equalsIgnoreCase(SkuName.ELASTIC_ISOLATED.toString())) {
return withWebAppAlwaysOn(false);
} else {
return withWebAppAlwaysOn(true);
}
}
@Override
public FunctionAppImpl withNewStorageAccount(String name, StorageAccountSkuType sku) {
StorageAccount.DefinitionStages.WithGroup storageDefine =
manager().storageManager().storageAccounts().define(name).withRegion(regionName());
if (super.creatableGroup != null && isInCreateMode()) {
storageAccountCreatable =
storageDefine
.withNewResourceGroup(super.creatableGroup)
.withGeneralPurposeAccountKindV2()
.withSku(sku);
} else {
storageAccountCreatable =
storageDefine
.withExistingResourceGroup(resourceGroupName())
.withGeneralPurposeAccountKindV2()
.withSku(sku);
}
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withNewStorageAccount(Creatable<StorageAccount> storageAccount) {
storageAccountCreatable = storageAccount;
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withExistingStorageAccount(StorageAccount storageAccount) {
this.storageAccountToSet = storageAccount;
return this;
}
@Override
public FunctionAppImpl withDailyUsageQuota(int quota) {
innerModel().withDailyMemoryTimeQuota(quota);
return this;
}
@Override
public FunctionAppImpl withoutDailyUsageQuota() {
return withDailyUsageQuota(0);
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(Creatable<AppServicePlan> appServicePlanCreatable) {
super.withNewAppServicePlan(appServicePlanCreatable);
if (appServicePlanCreatable instanceof AppServicePlan) {
this.autoSetAlwaysOn(((AppServicePlan) appServicePlanCreatable).pricingTier());
}
return this;
}
@Override
public FunctionAppImpl withExistingLinuxAppServicePlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan).autoSetAlwaysOn(appServicePlan.pricingTier());
}
@Override
public FunctionAppImpl withBuiltInImage(final FunctionRuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
withRuntime(runtimeStack.runtime());
withRuntimeVersion(runtimeStack.version());
siteConfig.withLinuxFxVersion(runtimeStack.getLinuxFxVersion());
return this;
}
@Override
public FunctionAppImpl withPublicDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateRegistryImage(String imageAndTag, String serverUrl) {
ensureLinuxPlan();
super.withPrivateRegistryImage(imageAndTag, serverUrl);
if (isFunctionAppOnACA()) {
try {
URL url = new URL(serverUrl);
withAppSetting(SETTING_REGISTRY_SERVER, url.getAuthority() + url.getFile());
} catch (MalformedURLException e) {
}
}
return this;
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
if (siteConfig != null && siteConfig.windowsFxVersion() != null) {
siteConfig.withWindowsFxVersion(null);
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
protected OperatingSystem appServicePlanOperatingSystem(AppServicePlan appServicePlan) {
return (appServicePlan.innerModel().reserved() == null || !appServicePlan.innerModel().reserved())
? OperatingSystem.WINDOWS
: OperatingSystem.LINUX;
}
@Override
public StorageAccount storageAccount() {
return currentStorageAccount;
}
@Override
public String getMasterKey() {
return getMasterKeyAsync().block();
}
@Override
public Mono<String> getMasterKeyAsync() {
return this.manager().serviceClient().getWebApps().listHostKeysAsync(resourceGroupName(), name())
.map(HostKeysInner::masterKey);
}
@Override
public PagedIterable<FunctionEnvelope> listFunctions() {
return this.manager().functionApps().listFunctions(resourceGroupName(), name());
}
@Override
public Map<String, String> listFunctionKeys(String functionName) {
return listFunctionKeysAsync(functionName).block();
}
@Override
public Mono<Map<String, String>> listFunctionKeysAsync(final String functionName) {
return functionService
.listFunctionKeys(functionServiceHost, functionName)
.map(
result -> {
Map<String, String> keys = new HashMap<>();
if (result.keys != null) {
for (NameValuePair pair : result.keys) {
keys.put(pair.name(), pair.value());
}
}
return keys;
});
}
@Override
public NameValuePair addFunctionKey(String functionName, String keyName, String keyValue) {
return addFunctionKeyAsync(functionName, keyName, keyValue).block();
}
@Override
public Mono<NameValuePair> addFunctionKeyAsync(String functionName, String keyName, String keyValue) {
if (keyValue != null) {
return functionService
.addFunctionKey(
functionServiceHost,
functionName,
keyName,
new NameValuePair().withName(keyName).withValue(keyValue));
} else {
return functionService.generateFunctionKey(functionServiceHost, functionName, keyName);
}
}
@Override
public void removeFunctionKey(String functionName, String keyName) {
removeFunctionKeyAsync(functionName, keyName).block();
}
@Override
public Mono<Void> removeFunctionKeyAsync(String functionName, String keyName) {
return functionService.deleteFunctionKey(functionServiceHost, functionName, keyName);
}
@Override
public void triggerFunction(String functionName, Object payload) {
triggerFunctionAsync(functionName, payload).block();
}
@Override
public Mono<Void> triggerFunctionAsync(String functionName, Object payload) {
return functionService.triggerFunction(functionServiceHost, functionName, payload);
}
@Override
public void syncTriggers() {
syncTriggersAsync().block();
}
@Override
public Mono<Void> syncTriggersAsync() {
return manager()
.serviceClient()
.getWebApps()
.syncFunctionTriggersAsync(resourceGroupName(), name())
.onErrorResume(
throwable -> {
if (throwable instanceof ManagementException
&& ((ManagementException) throwable).getResponse().getStatusCode() == 200) {
return Mono.empty();
} else {
return Mono.error(throwable);
}
});
}
@Override
public String managedEnvironmentId() {
return innerModel().managedEnvironmentId();
}
@Override
public Integer maxReplicas() {
if (this.siteConfig == null) {
return null;
}
return this.siteConfig.functionAppScaleLimit();
}
@Override
public Integer minReplicas() {
if (this.siteConfig == null) {
return null;
}
return this.siteConfig.minimumElasticInstanceCount();
}
@Override
public Flux<String> streamApplicationLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamApplicationLogsAsync());
}
@Override
public Flux<String> streamHttpLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamHttpLogsAsync());
}
@Override
public Flux<String> streamTraceLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamTraceLogsAsync());
}
@Override
public Flux<String> streamDeploymentLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamDeploymentLogsAsync());
}
@Override
public Flux<String> streamAllLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamAllLogsAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile, long length) {
return kuduClient.zipDeployAsync(zipFile, length);
}
@Override
public void zipDeploy(InputStream zipFile, long length) {
zipDeployAsync(zipFile, length).block();
}
@Override
public void beforeGroupCreateOrUpdate() {
if (isFunctionAppOnACA()) {
adaptForFunctionAppOnACA();
}
super.beforeGroupCreateOrUpdate();
}
private void adaptForFunctionAppOnACA() {
this.innerModel().withReserved(null);
if (this.siteConfig != null) {
SiteConfigInner siteConfigInner = new SiteConfigInner();
siteConfigInner.withLinuxFxVersion(this.siteConfig.linuxFxVersion());
siteConfigInner.withMinimumElasticInstanceCount(this.siteConfig.minimumElasticInstanceCount());
siteConfigInner.withFunctionAppScaleLimit(this.siteConfig.functionAppScaleLimit());
siteConfigInner.withAppSettings(this.siteConfig.appSettings() == null ? new ArrayList<>() : this.siteConfig.appSettings());
if (!appSettingsToAdd.isEmpty() || !appSettingsToRemove.isEmpty()) {
for (String settingToRemove : appSettingsToRemove) {
siteConfigInner.appSettings().removeIf(kvPair -> Objects.equals(settingToRemove, kvPair.name()));
}
for (Map.Entry<String, String> entry : appSettingsToAdd.entrySet()) {
siteConfigInner.appSettings().add(new NameValuePair().withName(entry.getKey()).withValue(entry.getValue()));
}
}
this.innerModel().withSiteConfig(siteConfigInner);
}
}
@Override
public Mono<FunctionApp> createAsync() {
if (this.isInCreateMode()) {
if (innerModel().serverFarmId() == null && !isFunctionAppOnACA()) {
withNewConsumptionPlan();
}
if (currentStorageAccount == null && storageAccountToSet == null && storageAccountCreatable == null) {
withNewStorageAccount(
this.manager().resourceManager().internalContext()
.randomResourceName(getStorageAccountName(), 20),
StorageAccountSkuType.STANDARD_LRS);
}
}
return super.createAsync();
}
@Override
public Mono<Void> afterPostRunAsync(final boolean isGroupFaulted) {
if (!isGroupFaulted) {
initializeFunctionService();
}
return super.afterPostRunAsync(isGroupFaulted);
}
@Override
public FunctionAppImpl withManagedEnvironmentId(String managedEnvironmentId) {
this.innerModel().withManagedEnvironmentId(managedEnvironmentId);
if (!CoreUtils.isNullOrEmpty(managedEnvironmentId)) {
this.innerModel().withKind("functionapp,linux,container,azurecontainerapps");
if (this.siteConfig == null) {
this.siteConfig = new SiteConfigResourceInner().withAppSettings(new ArrayList<>());
}
}
return this;
}
@Override
public FunctionAppImpl withMaxReplicas(int maxReplicas) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withFunctionAppScaleLimit(maxReplicas);
return this;
}
@Override
public FunctionAppImpl withMinReplicas(int minReplicas) {
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
siteConfig.withMinimumElasticInstanceCount(minReplicas);
return this;
}
@Override
public Mono<Map<String, AppSetting>> getAppSettingsAsync() {
if (isFunctionAppOnACA()) {
return listAppSettings()
.map(
appSettingsInner ->
appSettingsInner
.properties()
.entrySet()
.stream()
.collect(
Collectors
.toMap(
Map.Entry::getKey,
entry ->
new AppSettingImpl(
entry.getKey(),
entry.getValue(),
false))));
} else {
return super.getAppSettingsAsync();
}
}
/**
* Whether this Function App is on Azure Container Apps environment.
*
* @return whether this Function App is on Azure Container Apps environment
*/
boolean isFunctionAppOnACA() {
return isFunctionAppOnACA(innerModel());
}
static boolean isFunctionAppOnACA(SiteInner siteInner) {
return siteInner != null && !CoreUtils.isNullOrEmpty(siteInner.managedEnvironmentId());
}
@Override
Mono<SiteInner> updateInner(SitePatchResourceInner siteUpdate) {
Mono<SiteInner> updateInner = super.updateInner(siteUpdate);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(updateInner);
} else {
return updateInner;
}
}
@Override
Mono<SiteConfigResourceInner> createOrUpdateSiteConfig(SiteConfigResourceInner siteConfig) {
Mono<SiteConfigResourceInner> createOrUpdateSiteConfig = super.createOrUpdateSiteConfig(siteConfig);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(createOrUpdateSiteConfig);
} else {
return createOrUpdateSiteConfig;
}
}
@Override
Mono<StringDictionaryInner> updateAppSettings(StringDictionaryInner inner) {
Mono<StringDictionaryInner> updateAppSettings = super.updateAppSettings(inner);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(updateAppSettings);
} else {
return updateAppSettings;
}
}
@Override
Mono<ConnectionStringDictionaryInner> updateConnectionStrings(ConnectionStringDictionaryInner inner) {
Mono<ConnectionStringDictionaryInner> updateConnectionStrings = super.updateConnectionStrings(inner);
if (isFunctionAppOnACA()) {
return RetryUtils.backoffRetryForFunctionAppAca(updateConnectionStrings);
} else {
return updateConnectionStrings;
}
}
@Override
public void deploy(DeployType type, File file) {
deployAsync(type, file).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, File file) {
return deployAsync(type, file, null);
}
@Override
public void deploy(DeployType type, File file, DeployOptions deployOptions) {
deployAsync(type, file, null).block();
}
@Override
@Override
public void deploy(DeployType type, InputStream file, long length) {
deployAsync(type, file, length).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length) {
return deployAsync(type, file, length, null);
}
@Override
public void deploy(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
deployAsync(type, file, length, null).block();
}
@Override
public Mono<Void> deployAsync(DeployType type, InputStream file, long length, DeployOptions deployOptions) {
return this.pushDeployAsync(type, file, length, null)
.flatMap(result -> kuduClient.pollDeploymentStatus(result, manager().serviceClient().getDefaultPollInterval()));
}
@Override
public KuduDeploymentResult pushDeploy(DeployType type, File file, DeployOptions deployOptions) {
return pushDeployAsync(type, file, deployOptions).block();
}
@Override
public Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, File file, DeployOptions deployOptions) {
if (type != DeployType.ZIP) {
return Mono.error(new IllegalArgumentException("Deployment to Function App supports ZIP package."));
}
return getAppServicePlanIsFlexConsumptionMono().flatMap(appServiceIsFlexConsumptionPlan -> {
try {
if (appServiceIsFlexConsumptionPlan) {
return kuduClient.pushDeployFlexConsumptionAsync(file);
} else {
return kuduClient.pushZipDeployAsync(file)
.then(Mono.just(new KuduDeploymentResult("latest")));
}
} catch (IOException e) {
return Mono.error(e);
}
});
}
private Mono<KuduDeploymentResult> pushDeployAsync(DeployType type, InputStream file, long length,
DeployOptions deployOptions) {
if (type != DeployType.ZIP) {
return Mono.error(new IllegalArgumentException("Deployment to Function App supports ZIP package."));
}
return getAppServicePlanIsFlexConsumptionMono().flatMap(appServiceIsFlexConsumptionPlan -> {
try {
if (appServiceIsFlexConsumptionPlan) {
return kuduClient.pushDeployFlexConsumptionAsync(file, length);
} else {
return kuduClient.pushZipDeployAsync(file, length)
.then(Mono.just(new KuduDeploymentResult("latest")));
}
} catch (IOException e) {
return Mono.error(e);
}
});
}
private Mono<Boolean> getAppServicePlanIsFlexConsumptionMono() {
Mono<Boolean> updateAppServicePlan = Mono.justOrEmpty(appServicePlanIsFlexConsumption);
if (appServicePlanIsFlexConsumption == null) {
updateAppServicePlan = Mono.defer(
() -> manager().appServicePlans()
.getByIdAsync(this.appServicePlanId())
.map(appServicePlan -> {
appServicePlanIsFlexConsumption = "FlexConsumption".equals(appServicePlan.pricingTier().toSkuDescription().tier());
return appServicePlanIsFlexConsumption;
}));
}
return updateAppServicePlan;
}
@Override
public CsmDeploymentStatus getDeploymentStatus(String deploymentId) {
return getDeploymentStatusAsync(deploymentId).block();
}
@Override
public Mono<CsmDeploymentStatus> getDeploymentStatusAsync(String deploymentId) {
SerializerAdapter serializerAdapter = SerializerFactory.createDefaultManagementSerializerAdapter();
return this.manager().serviceClient().getWebApps()
.getProductionSiteDeploymentStatusWithResponseAsync(this.resourceGroupName(), this.name(), deploymentId)
.flatMap(fluxResponse -> {
HttpResponse response = new HttpFluxBBResponse(fluxResponse);
return response.getBodyAsString()
.flatMap(bodyString -> {
CsmDeploymentStatus status;
try {
status = serializerAdapter.deserialize(bodyString, CsmDeploymentStatus.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.error(new ManagementException("Deserialize failed for response body.", response));
}
return Mono.justOrEmpty(status);
}).doFinally(ignored -> response.close());
});
}
@Host("{$host}")
@ServiceInterface(name = "FunctionService")
private interface FunctionService {
@Headers({
"Accept: application/json",
"Content-Type: application/json; charset=utf-8"
})
@Get("admin/functions/{name}/keys")
Mono<FunctionKeyListResult> listFunctionKeys(
@HostParam("$host") String host, @PathParam("name") String functionName);
@Headers({
"Accept: application/json",
"Content-Type: application/json; charset=utf-8"
})
@Put("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> addFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName,
@BodyParam("application/json") NameValuePair key);
@Headers({
"Accept: application/json",
"Content-Type: application/json; charset=utf-8"
})
@Post("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> generateFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Delete("admin/functions/{name}/keys/{keyName}")
Mono<Void> deleteFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Post("admin/host/ping")
Mono<Void> ping(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Get("admin/host/status")
Mono<Void> getHostStatus(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8"
})
@Post("admin/functions/{name}")
Mono<Void> triggerFunction(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@BodyParam("application/json") Object payload);
}
private static class FunctionKeyListResult implements JsonSerializable<FunctionKeyListResult> {
private List<NameValuePair> keys;
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
return jsonWriter
.writeStartObject()
.writeArrayField("keys", keys, JsonWriter::writeJson)
.writeEndObject();
}
public static FunctionKeyListResult fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
FunctionKeyListResult result = new FunctionKeyListResult();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("keys".equals(fieldName)) {
List<NameValuePair> keys = reader.readArray(reader1 ->
reader1.readObject(NameValuePair::fromJson));
result.keys = keys;
} else {
reader.skipChildren();
}
}
return result;
});
}
}
private String getStorageAccountName() {
return name().replaceAll("[^a-zA-Z0-9]", "");
}
} |
+1 | List<CosmosOperationPolicy> getOperationPolicies() {
return requestPolicies;
} | return requestPolicies; | List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
this.requestPolicies.add(policy);
return this;
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
checkNotNull(policy, "Argument 'policy' must not be null.");
this.requestPolicies.add(policy);
return this;
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} |
Changed to return an immutable copy | List<CosmosOperationPolicy> getOperationPolicies() {
return requestPolicies;
} | return requestPolicies; | List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
this.requestPolicies.add(policy);
return this;
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
checkNotNull(policy, "Argument 'policy' must not be null.");
this.requestPolicies.add(policy);
return this;
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} |
`container` assignment can be in-lined (no need of `asyncContainer` assignment)? | public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client);
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
} | container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); | public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
container = getSharedMultiPartitionCosmosContainer(this.client);
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final ImplementationBridgeHelpers.CosmosAsyncContainerHelper.CosmosAsyncContainerAccessor containerAccessor
= ImplementationBridgeHelpers.CosmosAsyncContainerHelper.getCosmosAsyncContainerAccessor();
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", ConsistencyLevel.STRONG.toString().toUpperCase(), "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "true", "true", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static CosmosClientBuilder[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@AfterMethod()
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", ConsistencyLevel.SESSION.toString().toUpperCase(), "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
{ "4", ConsistencyLevel.EVENTUAL.toString().toUpperCase(), "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions"/*, timeOut = TIMEOUT*/)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", "Session", "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "false", "false", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static Object[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@AfterMethod(alwaysRun = true)
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", "ConsistentPrefix", "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "true", "20", "QueryNameChanged" },
{ "4", "Eventual", "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "true", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse, false);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse, false);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse, true);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse, false);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response, boolean doesRequestLevelConsistencyOverrideMatter) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
if (doesRequestLevelConsistencyOverrideMatter) {
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
}
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} |
You're inheriting `TestSuiteBase` which provides `safeClose` which does the same (does null check prior as well). | public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
} | this.client.close(); | public void afterClass() {
safeClose(client);
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final ImplementationBridgeHelpers.CosmosAsyncContainerHelper.CosmosAsyncContainerAccessor containerAccessor
= ImplementationBridgeHelpers.CosmosAsyncContainerHelper.getCosmosAsyncContainerAccessor();
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", ConsistencyLevel.STRONG.toString().toUpperCase(), "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "true", "true", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static CosmosClientBuilder[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client);
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
@AfterMethod()
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", ConsistencyLevel.SESSION.toString().toUpperCase(), "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
{ "4", ConsistencyLevel.EVENTUAL.toString().toUpperCase(), "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions"/*, timeOut = TIMEOUT*/)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", "Session", "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "false", "false", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static Object[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
container = getSharedMultiPartitionCosmosContainer(this.client);
}
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
@AfterMethod(alwaysRun = true)
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", "ConsistentPrefix", "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "true", "20", "QueryNameChanged" },
{ "4", "Eventual", "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "true", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse, false);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse, false);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse, true);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse, false);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response, boolean doesRequestLevelConsistencyOverrideMatter) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
if (doesRequestLevelConsistencyOverrideMatter) {
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
}
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} |
changed | public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client);
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
} | container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); | public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
container = getSharedMultiPartitionCosmosContainer(this.client);
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final ImplementationBridgeHelpers.CosmosAsyncContainerHelper.CosmosAsyncContainerAccessor containerAccessor
= ImplementationBridgeHelpers.CosmosAsyncContainerHelper.getCosmosAsyncContainerAccessor();
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", ConsistencyLevel.STRONG.toString().toUpperCase(), "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "true", "true", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static CosmosClientBuilder[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@AfterMethod()
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", ConsistencyLevel.SESSION.toString().toUpperCase(), "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
{ "4", ConsistencyLevel.EVENTUAL.toString().toUpperCase(), "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions"/*, timeOut = TIMEOUT*/)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", "Session", "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "false", "false", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static Object[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@AfterMethod(alwaysRun = true)
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", "ConsistentPrefix", "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "true", "20", "QueryNameChanged" },
{ "4", "Eventual", "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "true", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse, false);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse, false);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse, true);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse, false);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response, boolean doesRequestLevelConsistencyOverrideMatter) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
if (doesRequestLevelConsistencyOverrideMatter) {
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
}
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} |
changed to use safeClose() | public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
} | this.client.close(); | public void afterClass() {
safeClose(client);
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final ImplementationBridgeHelpers.CosmosAsyncContainerHelper.CosmosAsyncContainerAccessor containerAccessor
= ImplementationBridgeHelpers.CosmosAsyncContainerHelper.getCosmosAsyncContainerAccessor();
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", ConsistencyLevel.STRONG.toString().toUpperCase(), "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "true", "true", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.valueOf(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static CosmosClientBuilder[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setCommonOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client);
container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId());
}
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
@AfterMethod()
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", ConsistencyLevel.SESSION.toString().toUpperCase(), "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
{ "4", ConsistencyLevel.EVENTUAL.toString().toUpperCase(), "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions"/*, timeOut = TIMEOUT*/)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(options[1]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase()).isEqualTo(changedOptions[1]);
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} | class OperationPoliciesTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosAsyncContainer container;
private static final Properties prop = new Properties();
private static final String E2E_TIMEOUT = "timeout.seconds";
private static final String CONSISTENCY_LEVEL = "consistency.level";
private static final String CONTENT_RESPONSE_ON_WRITE = "contentResponseOnWriteEnabled";
private static final String NON_IDEMPOTENT_WRITE_RETRIES = "nonIdempotentWriteRetriesEnabled";
private static final String BYPASS_CACHE = "dedicatedGatewayOptions.bypassCache";
private static final String THROUGHPUT_CONTROL_GROUP_NAME = "throughputControlGroupName";
private static final String REQUEST_CHARGE_THRESHOLD = "diagnosticThresholds.requestChargeThreshold";
private static final String SCAN_IN_QUERY = "scanInQueryEnabled";
private static final String EXCLUDE_REGIONS = "excludeRegions";
private static final String MAX_DEGREE_OF_PARALLELISM = "maxDegreeOfParallelism";
private static final String MAX_BUFFERED_ITEM_COUNT = "maxBufferedItemCount";
private static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_KB = "responseContinuationTokenLimitKb";
private static final String MAX_ITEM_COUNT = "maxItemCount";
private static final String QUERY_METRICS = "queryMetricsEnabled";
private static final String INDEX_METRICS = "indexMetricsEnabled";
private static final String MAX_PREFETCH_PAGE_COUNT = "maxPrefetchPageCount";
private static final String QUERY_NAME = "queryName";
private static final String[] optionLabels = {E2E_TIMEOUT, CONSISTENCY_LEVEL, CONTENT_RESPONSE_ON_WRITE, NON_IDEMPOTENT_WRITE_RETRIES, BYPASS_CACHE, THROUGHPUT_CONTROL_GROUP_NAME, REQUEST_CHARGE_THRESHOLD, SCAN_IN_QUERY, EXCLUDE_REGIONS, MAX_DEGREE_OF_PARALLELISM, MAX_BUFFERED_ITEM_COUNT, RESPONSE_CONTINUATION_TOKEN_LIMIT_KB, MAX_ITEM_COUNT, QUERY_METRICS, INDEX_METRICS, MAX_PREFETCH_PAGE_COUNT, QUERY_NAME};
private static final String[] initialOptions = {"20", "Session", "true", "false", "false", "default", "2000", "false", "East US 2", "2", "100", "200", "30", "false", "false", "10", "QueryName"};
@Factory(dataProvider = "clientBuildersWithApplyPolicies")
public OperationPoliciesTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
for (int i = 0; i < optionLabels.length; i++) {
prop.setProperty(optionLabels[i], initialOptions[i]);
}
}
private static void createReadDeleteBatchEtcOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Create") || operationType.equals("Read") || operationType.equals("Replace")
|| operationType.equals("Delete") || operationType.equals("Patch") || operationType.equals("Upsert")
|| (operationType.equals("Batch") && spanName.contains("transactionalBatch"))) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)))
.setContentResponseOnWriteEnabled(Boolean.parseBoolean(prop.getProperty(CONTENT_RESPONSE_ON_WRITE)))
.setNonIdempotentWriteRetriesEnabled(Boolean.parseBoolean(prop.getProperty(NON_IDEMPOTENT_WRITE_RETRIES)))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))));
}
}
private static void createQueryReadAllItemsOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Query") || spanName.contains("readAllItems")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setScanInQueryEnabled(Boolean.parseBoolean(prop.getProperty(SCAN_IN_QUERY)))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setMaxDegreeOfParallelism(Integer.parseInt(prop.getProperty(MAX_DEGREE_OF_PARALLELISM)))
.setMaxBufferedItemCount(Integer.parseInt(prop.getProperty(MAX_BUFFERED_ITEM_COUNT)))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setQueryName(prop.getProperty(QUERY_NAME))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createReadManyOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("readMany")) {
cosmosRequestOptions.setCosmosEndToEndLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfig(true,
Duration.ofSeconds(Long.parseLong(prop.getProperty(E2E_TIMEOUT))),
new ThresholdBasedAvailabilityStrategy()))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setDedicatedGatewayRequestOptions(new DedicatedGatewayRequestOptions()
.setIntegratedCacheBypassed(Boolean.parseBoolean(prop.getProperty(BYPASS_CACHE))))
.setExcludeRegions(new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(","))))
.setResponseContinuationTokenLimitInKb(Integer.parseInt(prop.getProperty(RESPONSE_CONTINUATION_TOKEN_LIMIT_KB)))
.setQueryMetricsEnabled(Boolean.parseBoolean(prop.getProperty(QUERY_METRICS)))
.setIndexMetricsEnabled(Boolean.parseBoolean(prop.getProperty(INDEX_METRICS)))
.setConsistencyLevel(ConsistencyLevel.fromServiceSerializedFormat(prop.getProperty(CONSISTENCY_LEVEL)));
}
}
private static void createBulkOptions(String operationType, String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (operationType.equals("Batch") && spanName.contains("nonTransactionalBatch")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME));
}
}
private static void createChangeFeedOptions(String spanName, CosmosRequestOptions cosmosRequestOptions) {
if (spanName.contains("queryChangeFeed")) {
cosmosRequestOptions.setExcludeRegions((new ArrayList<>(Arrays.asList(prop.getProperty(EXCLUDE_REGIONS).split(",")))))
.setThroughputControlGroupName(prop.getProperty(THROUGHPUT_CONTROL_GROUP_NAME))
.setThresholds(new CosmosDiagnosticsThresholds().setRequestChargeThreshold(Float.parseFloat(prop.getProperty(REQUEST_CHARGE_THRESHOLD))))
.setMaxPrefetchPageCount(Integer.parseInt(prop.getProperty(MAX_PREFETCH_PAGE_COUNT)))
.setMaxItemCount(Integer.parseInt(prop.getProperty(MAX_ITEM_COUNT)));
}
}
@DataProvider
public static Object[] clientBuildersWithApplyPolicies() {
CosmosOperationPolicy policy = (cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
};
CosmosClientBuilder[] clientBuilders = new CosmosClientBuilder[3];
clientBuilders[0] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode()
.addOperationPolicy(policy);
clientBuilders[1] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy(policy);
clientBuilders[2] = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createReadDeleteBatchEtcOptions(operationType, spanName, cosmosRequestOptions);
createQueryReadAllItemsOptions(operationType, spanName, cosmosRequestOptions);
createReadManyOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
}).addOperationPolicy((cosmosOperationDetails) -> {
CosmosDiagnosticsContext cosmosDiagnosticsContext = cosmosOperationDetails.getDiagnosticsContext();
String operationType = cosmosDiagnosticsContext.getOperationType();
String spanName = cosmosDiagnosticsContext.getSpanName();
CosmosRequestOptions cosmosRequestOptions = new CosmosRequestOptions();
createBulkOptions(operationType, spanName, cosmosRequestOptions);
createChangeFeedOptions(spanName, cosmosRequestOptions);
cosmosOperationDetails.setRequestOptions(cosmosRequestOptions);
});
return clientBuilders;
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT)
public void before_OperationPoliciesTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
container = getSharedMultiPartitionCosmosContainer(this.client);
}
@AfterClass(groups = {"fast"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
@AfterMethod(alwaysRun = true)
public void afterMethod() {
changeProperties(initialOptions);
}
@DataProvider(name = "changedOptions")
private String[][] createChangedOptions() {
return new String[][] {
{ "8", "ConsistentPrefix", "true", "false", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "false", "true", "20", "QueryNameChanged" },
{ "4", "Eventual", "false", "true", "true", "defaultChanged", "1000", "true", "West US 2", "4", "200", "400", "100", "true", "false", "20", "QueryNameChanged" },
initialOptions
};
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void createItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, itemResponse);
validateOptions(initialOptions, itemResponse, false);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
itemResponse = container.createItem(item).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
validateItemResponse(item, itemResponse);
} else {
assertThat(BridgeInternal.getProperties(itemResponse)).isNull();
}
validateOptions(changedOptions, itemResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void deleteItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
CosmosItemResponse<?> deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(initialOptions, deleteResponse, false);
changeProperties(changedOptions);
container.createItem(item).block();
deleteResponse = container.deleteItem(item.getId(),
new PartitionKey(item.get("mypk")),
options).block();
assertThat(deleteResponse.getStatusCode()).isEqualTo(204);
validateOptions(changedOptions, deleteResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(item).block();
CosmosItemResponse<InternalObjectNode> readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(initialOptions, readResponse, true);
changeProperties(changedOptions);
readResponse = container.readItem(item.getId(),
new PartitionKey(item.get("mypk")),
new CosmosItemRequestOptions(),
InternalObjectNode.class).block();
validateItemResponse(item, readResponse);
validateOptions(changedOptions, readResponse, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void upsertItem(String[] changedOptions) throws Throwable {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
validateItemResponse(item, upsertResponse);
validateOptions(initialOptions, upsertResponse, false);
changeProperties(changedOptions);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
upsertResponse = container.upsertItem(item, new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(upsertResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(upsertResponse)).isNull();
}
validateOptions(changedOptions, upsertResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void patchItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(item, new CosmosItemRequestOptions()).block();
validateItemResponse(item, createResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
PartitionKey partitionKey = new PartitionKey(item.get("mypk"));
CosmosPatchOperations patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
CosmosItemResponse<InternalObjectNode> patchResponse = container.patchItem(
item.getId(), partitionKey, patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, patchResponse, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
patchOperations = CosmosPatchOperations.create();
patchOperations.add("/" + newPropLabel, newPropValue);
patchResponse = container.patchItem(item.getId(), partitionKey,
patchOperations, InternalObjectNode.class).block();
assertThat(patchResponse.getRequestCharge()).isGreaterThan(0);
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(patchResponse).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(patchResponse)).isNull();
}
validateOptions(changedOptions, patchResponse, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void replaceItem(String[] changedOptions) throws Exception {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
CosmosItemResponse<InternalObjectNode> itemResponse = container.createItem(item).block();
validateItemResponse(item, itemResponse);
String newPropLabel = "newProp";
String newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
PartitionKey pk = new PartitionKey(item.get("mypk"));
ModelBridgeInternal.setPartitionKey(options, pk);
CosmosItemResponse<InternalObjectNode> replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
validateOptions(initialOptions, replace, false);
changeProperties(changedOptions);
newPropValue = UUID.randomUUID().toString();
item.set(newPropLabel, newPropValue, CosmosItemSerializer.DEFAULT_SERIALIZER);
replace = container.replaceItem(item,
item.getId(),
pk,
options).block();
if (changedOptions[2].equals("true")) {
assertThat(BridgeInternal.getProperties(replace).get(newPropLabel)).isEqualTo(newPropValue);
} else {
assertThat(BridgeInternal.getProperties(replace)).isNull();
}
validateOptions(changedOptions, replace, false);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void bulk(String[] changedOptions) {
Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.range(0, 10).map(i -> {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
return CosmosBulkOperations.getCreateItemOperation(item, new PartitionKey(item.get("mypk")));
});
CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions();
Flux<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(initialOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc.get()).isEqualTo(10);
changeProperties(changedOptions);
responseFlux = container
.executeBulkOperations(cosmosItemOperationFlux, cosmosBulkExecutionOptions);
AtomicInteger processedDoc2 = new AtomicInteger(0);
responseFlux
.flatMap((CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse) -> {
processedDoc2.incrementAndGet();
CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse();
if (cosmosBulkOperationResponse.getException() != null) {
logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException());
fail(cosmosBulkOperationResponse.getException().toString());
}
assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull();
assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull();
assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull();
assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull();
validateOptions(changedOptions, cosmosBulkItemResponse);
return Mono.just(cosmosBulkItemResponse);
}).blockLast();
assertThat(processedDoc2.get()).isEqualTo(10);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void batch(String[] changedOptions) {
InternalObjectNode item = getDocumentDefinition(UUID.randomUUID().toString());
InternalObjectNode item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
CosmosBatchResponse batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(initialOptions, batchResponse);
changeProperties(changedOptions);
item = getDocumentDefinition(UUID.randomUUID().toString());
item2 = getDocumentDefinition(UUID.randomUUID().toString());
item2.set("mypk", item.get("mypk"), CosmosItemSerializer.DEFAULT_SERIALIZER);
batch = CosmosBatch.createCosmosBatch(new PartitionKey(item.get("mypk")));
batch.createItemOperation(item);
batch.createItemOperation(item2);
batchResponse = container.executeCosmosBatch(batch).block();
assertThat(batchResponse).isNotNull();
assertThat(batchResponse.getStatusCode())
.as("Batch server response had StatusCode {0} instead of {1} expected and had ErrorMessage {2}",
batchResponse.getStatusCode(), HttpResponseStatus.OK.code())
.isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.size()).isEqualTo(2);
assertThat(batchResponse.getRequestCharge()).isPositive();
assertThat(batchResponse.getDiagnostics().toString()).isNotEmpty();
validateOptions(changedOptions, batchResponse);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void query(String[] changedOptions) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
String query = String.format("SELECT * from c where c.id = '%s'", id);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.queryItems(query, InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0).getId()).isEqualTo(id);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readAllItems(String[] changedOptions) throws Exception {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(initialOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
changeProperties(changedOptions);
container.readAllItems(InternalObjectNode.class).byPage()
.flatMap(feedResponse -> {
List<InternalObjectNode> results = feedResponse.getResults();
assertThat(feedResponse.getRequestCharge()).isGreaterThan(0);
assertThat(results.size()).isGreaterThanOrEqualTo(1);
validateOptions(changedOptions, feedResponse, false, false);
return Flux.empty();
}).blockLast();
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void readMany(String[] changedOptions) throws Exception {
List<CosmosItemIdentity> cosmosItemIdentities = new ArrayList<>();
Set<String> idSet = new HashSet<>();
int numDocuments = 5;
for (int i = 0; i < numDocuments; i++) {
InternalObjectNode document = getDocumentDefinition(UUID.randomUUID().toString());
container.createItem(document).block();
PartitionKey partitionKey = new PartitionKey(document.get("mypk"));
CosmosItemIdentity cosmosItemIdentity = new CosmosItemIdentity(partitionKey, document.getId());
cosmosItemIdentities.add(cosmosItemIdentity);
idSet.add(document.getId());
}
FeedResponse<InternalObjectNode> feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(initialOptions, feedResponse, false, true);
changeProperties(changedOptions);
feedResponse = container.readMany(cosmosItemIdentities, InternalObjectNode.class).block();
assertThat(feedResponse).isNotNull();
assertThat(feedResponse.getResults()).isNotNull();
assertThat(feedResponse.getResults().size()).isEqualTo(numDocuments);
for (int i = 0; i < feedResponse.getResults().size(); i++) {
InternalObjectNode fetchedResult = feedResponse.getResults().get(i);
assertThat(idSet.contains(fetchedResult.getId())).isTrue();
}
validateOptions(changedOptions, feedResponse, false, true);
}
@Test(groups = { "fast" }, dataProvider = "changedOptions", timeOut = TIMEOUT)
public void queryChangeFeed(String[] changedOptions) {
int numInserted = 20;
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions
.createForProcessingFromBeginning(FeedRange.forFullRange());
Iterator<FeedResponse<InternalObjectNode>> responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
String continuationToken = "";
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
continuationToken = response.getContinuationToken();
validateOptions(initialOptions, response, true, false);
}
changeProperties(changedOptions);
for (int i = 0; i < numInserted; i++) {
String id = UUID.randomUUID().toString();
container.createItem(getDocumentDefinition(id)).block();
}
options = CosmosChangeFeedRequestOptions
.createForProcessingFromContinuation(continuationToken);
responseIterator = container.queryChangeFeed(options, InternalObjectNode.class).byPage()
.toIterable().iterator();
int totalResults = 0;
while (responseIterator.hasNext()) {
FeedResponse<InternalObjectNode> response = responseIterator.next();
assertThat(response.getRequestCharge()).isGreaterThan(0);
totalResults += response.getResults().size();
validateOptions(changedOptions, response, true, false);
}
assertThat(totalResults).isEqualTo(numInserted);
}
private InternalObjectNode getDocumentDefinition(String documentId) {
final String uuid = UUID.randomUUID().toString();
final InternalObjectNode properties =
new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, documentId, uuid));
return properties;
}
private void validateItemResponse(InternalObjectNode containerProperties,
CosmosItemResponse<InternalObjectNode> response) {
assertThat(BridgeInternal.getProperties(response).getId()).isNotNull();
assertThat(BridgeInternal.getProperties(response).getId())
.as("check Resource Id")
.isEqualTo(containerProperties.getId());
}
private void validateOptions(String[] options, CosmosItemResponse<?> response, boolean doesRequestLevelConsistencyOverrideMatter) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(options[0]));
if (doesRequestLevelConsistencyOverrideMatter) {
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
}
assertThat(requestOptions.isContentResponseOnWriteEnabled()).isEqualTo(Boolean.parseBoolean(options[2]));
assertThat(requestOptions.getNonIdempotentWriteRetriesEnabled()).isEqualTo(Boolean.parseBoolean(options[3]));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(options[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBatchResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(options[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(options[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
}
private void validateOptions(String[] options, CosmosBulkItemResponse response) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(options[8].split(","))));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(options[5]);
}
private void validateOptions(String[] changedOptions, FeedResponse<InternalObjectNode> response, boolean isChangeFeed, boolean isReadMany) {
OverridableRequestOptions requestOptions = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor().getRequestOptions(
response.getCosmosDiagnostics().getDiagnosticsContext());
if (isChangeFeed) {
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.getMaxPrefetchPageCount()).isEqualTo(Integer.parseInt(changedOptions[15]));
} else if (isReadMany) {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
} else {
assertThat(requestOptions.getCosmosEndToEndLatencyPolicyConfig().getEndToEndOperationTimeout().getSeconds())
.isEqualTo(Long.parseLong(changedOptions[0]));
assertThat(requestOptions.getConsistencyLevel().toString().toUpperCase(Locale.ROOT)).isEqualTo(changedOptions[1].toUpperCase(Locale.ROOT));
assertThat(requestOptions.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()).isEqualTo(Boolean.parseBoolean(changedOptions[4]));
assertThat(requestOptions.getThroughputControlGroupName()).isEqualTo(changedOptions[5]);
assertThat(requestOptions.getDiagnosticsThresholds().getRequestChargeThreshold()).isEqualTo(Float.parseFloat(changedOptions[6]));
assertThat(requestOptions.isScanInQueryEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[7]));
assertThat(requestOptions.getExcludedRegions()).isEqualTo(new ArrayList<>(Arrays.asList(changedOptions[8].split(","))));
assertThat(requestOptions.getMaxDegreeOfParallelism()).isEqualTo(Integer.parseInt(changedOptions[9]));
assertThat(requestOptions.getMaxBufferedItemCount()).isEqualTo(Integer.parseInt(changedOptions[10]));
assertThat(requestOptions.getResponseContinuationTokenLimitInKb()).isEqualTo(Integer.parseInt(changedOptions[11]));
assertThat(requestOptions.getMaxItemCount()).isEqualTo(Integer.parseInt(changedOptions[12]));
assertThat(requestOptions.isQueryMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[13]));
assertThat(requestOptions.isIndexMetricsEnabled()).isEqualTo(Boolean.parseBoolean(changedOptions[14]));
assertThat(requestOptions.getQueryNameOrDefault("")).isEqualTo(changedOptions[16]);
}
}
private void changeProperties(String[] values) {
for (int i = 0; i < values.length; i++) {
prop.setProperty(optionLabels[i], values[i]);
}
}
} |
null check | public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
this.requestPolicies.add(policy);
return this;
} | this.requestPolicies.add(policy); | public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
checkNotNull(policy, "Argument 'policy' must not be null.");
this.requestPolicies.add(policy);
return this;
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} |
changed | public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
this.requestPolicies.add(policy);
return this;
} | this.requestPolicies.add(policy); | public CosmosClientBuilder addOperationPolicy(CosmosOperationPolicy policy) {
checkNotNull(policy, "Argument 'policy' must not be null.");
this.requestPolicies.add(policy);
return this;
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} | class CosmosClientBuilder implements
TokenCredentialTrait<CosmosClientBuilder>,
AzureKeyCredentialTrait<CosmosClientBuilder>,
EndpointTrait<CosmosClientBuilder> {
private final static Logger logger = LoggerFactory.getLogger(CosmosClientBuilder.class);
private Configs configs = new Configs();
private String serviceEndpoint;
private String keyOrResourceToken;
private CosmosClientMetadataCachesSnapshot state;
private TokenCredential tokenCredential;
private ConnectionPolicy connectionPolicy;
private GatewayConnectionConfig gatewayConnectionConfig;
private DirectConnectionConfig directConnectionConfig;
private ConsistencyLevel desiredConsistencyLevel;
private List<CosmosPermissionProperties> permissions;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private AzureKeyCredential credential;
private boolean sessionCapturingOverrideEnabled;
private boolean connectionSharingAcrossClientsEnabled;
private boolean contentResponseOnWriteEnabled;
private String userAgentSuffix;
private ThrottlingRetryOptions throttlingRetryOptions;
private List<String> preferredRegions;
private boolean endpointDiscoveryEnabled = true;
private boolean multipleWriteRegionsEnabled = true;
private boolean readRequestsFallbackEnabled = true;
private WriteRetryPolicy writeRetryPolicy = WriteRetryPolicy.DISABLED;
private CosmosClientTelemetryConfig clientTelemetryConfig;
private ApiType apiType = null;
private Boolean clientTelemetryEnabledOverride = null;
private CosmosContainerProactiveInitConfig proactiveContainerInitConfig;
private CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private SessionRetryOptions sessionRetryOptions;
private Supplier<CosmosExcludedRegions> cosmosExcludedRegionsSupplier;
private final List<CosmosOperationPolicy> requestPolicies;
private CosmosItemSerializer defaultCustomSerializer;
private boolean isRegionScopedSessionCapturingEnabled = false;
/**
* Instantiates a new Cosmos client builder.
*/
public CosmosClientBuilder() {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
this.userAgentSuffix = "";
this.throttlingRetryOptions = new ThrottlingRetryOptions();
this.clientTelemetryConfig = new CosmosClientTelemetryConfig();
this.resetNonIdempotentWriteRetryPolicy();
this.requestPolicies = new LinkedList<>();
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) {
this.state = metadataCachesSnapshot;
return this;
}
CosmosClientMetadataCachesSnapshot metadataCaches() {
return this.state;
}
/**
* Sets a {@code boolean} flag to reduce the frequency of retries when the client
* strives to meet Session Consistency guarantees for operations
* that can be scoped to a single logical partition. Read your writes for a given logical partition
* should see higher stickiness to regions where the logical partition was written to prior or saw requests in
* thus reducing unnecessary cross-region retries. Reduction of retries would reduce CPU utilization spikes on VMs
* where the client is deployed along with latency savings through reduction of cross-region calls.
*
* <p>
* DISCLAIMER: Setting the {@link CosmosClientBuilder
* will impact all operations executed through this instance of the client provided that
* both the operation and the account support multi-region writes.
* </p>
* <p>
* Setting {@link CosmosClientBuilder
* ensure to maintain a singleton instance of {@link CosmosClient} or {@link CosmosAsyncClient}.
* </p>
*
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Create</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Replace</li>
* <li>Batch</li>
* <li>Patch</li>
* <li>Query when scoped to a single logical partition by specifying {@code PartitionKey} with {@link com.azure.cosmos.models.CosmosQueryRequestOptions}</li>
* <li>Change feed when scoped to a single logical partition by using {@code FeedRange.forLogicalPartition()} with {@link com.azure.cosmos.models.CosmosChangeFeedRequestOptions}</li>
* </ul>
*
* <p>
* NOTE: Bulk operations are not supported.
* </p>
*
* @param isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* @return current {@link CosmosClientBuilder}
* */
CosmosClientBuilder regionScopedSessionCapturingEnabled(boolean isRegionScopedSessionCapturingEnabled) {
this.isRegionScopedSessionCapturingEnabled = isRegionScopedSessionCapturingEnabled;
return this;
}
/**
* Gets the {@code boolean} flag {@link CosmosClientBuilder
*
* @return isRegionScopedSessionCapturingEnabled A {@code boolean} flag
* */
boolean isRegionScopedSessionCapturingEnabled() {
return this.isRegionScopedSessionCapturingEnabled;
}
/**
* Sets an apiType for the builder.
* @param apiType
* @return current cosmosClientBuilder
*/
CosmosClientBuilder setApiType(ApiType apiType){
this.apiType = apiType;
return this;
}
/**
* Adds a policy for modifying request options dynamically. The last policy defined aimed towards
* the same operation type will be the one ultimately applied.
*
* @param policy the policy to add
* @return current cosmosClientBuilder
*/
List<CosmosOperationPolicy> getOperationPolicies() {
return UnmodifiableList.unmodifiableList(this.requestPolicies);
}
/**
* Returns apiType for the Builder.
* @return
*/
ApiType apiType(){ return this.apiType; }
/**
* Session capturing is enabled by default for {@link ConsistencyLevel
* For other consistency levels, it is not needed, unless if you need occasionally send requests with Session
* Consistency while the client is not configured in session.
* <p>
* enabling Session capturing for Session mode has no effect.
* @param sessionCapturingOverrideEnabled session capturing override
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) {
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
return this;
}
/**
* Indicates if Session capturing is enabled for non Session modes.
* The default is false.
*
* @return the session capturing override
*/
boolean isSessionCapturingOverrideEnabled() {
return this.sessionCapturingOverrideEnabled;
}
/**
* Enables connections sharing across multiple Cosmos Clients. The default is false.
* <br/>
* <br/>
* <pre>
* {@code
* CosmosAsyncClient client1 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint1)
* .key(key1)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
* CosmosAsyncClient client2 = new CosmosClientBuilder()
* .endpoint(serviceEndpoint2)
* .key(key2)
* .consistencyLevel(ConsistencyLevel.SESSION)
* .connectionSharingAcrossClientsEnabled(true)
* .buildAsyncClient();
*
*
* }
* </pre>
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout
* config) of the first instantiated client will be used for all other client instances.
* <br/>
* @param connectionSharingAcrossClientsEnabled connection sharing
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
return this;
}
/**
* Indicates whether connection sharing is enabled. The default is false.
* <br/>
* When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts,
* enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client.
* <br/>
* @return the connection sharing across multiple clients
*/
boolean isConnectionSharingAcrossClientsEnabled() {
return this.connectionSharingAcrossClientsEnabled;
}
/**
* Gets the token resolver
* <br/>
* @return the token resolver
*/
CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
/**
* Sets the token resolver
*
* @param cosmosAuthorizationTokenResolver the token resolver
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder authorizationTokenResolver(
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver,
"'cosmosAuthorizationTokenResolver' cannot be null.");
this.keyOrResourceToken = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the Azure Cosmos DB endpoint the SDK will connect to
*
* @return the endpoint
*/
String getEndpoint() {
return serviceEndpoint;
}
/**
* Sets the Azure Cosmos DB endpoint the SDK will connect to
*
* @param endpoint the service endpoint
* @return current Builder
*/
@Override
public CosmosClientBuilder endpoint(String endpoint) {
this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Gets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @return the key
*/
String getKey() {
return keyOrResourceToken;
}
/**
* Sets either a master or readonly key used to perform authentication
* for accessing resource.
*
* @param key master or readonly key
* @return current Builder.
*/
public CosmosClientBuilder key(String key) {
this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a resource token used to perform authentication
* for accessing resource.
*
* @return the resourceToken
*/
String getResourceToken() {
return keyOrResourceToken;
}
/**
* Sets a resource token used to perform authentication
* for accessing resource.
*
* @param resourceToken resourceToken for authentication
* @return current Builder.
*/
public CosmosClientBuilder resourceToken(String resourceToken) {
this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null.");
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets a token credential instance used to perform authentication
* for accessing resource.
*
* @return the token credential.
*/
TokenCredential getTokenCredential() {
return tokenCredential;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param credential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated CosmosClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public CosmosClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.permissions = null;
return this;
}
/**
* Gets the permission list, which contains the
* resource tokens needed to access resources.
*
* @return the permission list
*/
List<CosmosPermissionProperties> getPermissions() {
return permissions;
}
/**
* Sets the permission list, which contains the
* resource tokens needed to access resources.
*
* @param permissions Permission list for authentication.
* @return current Builder.
*/
public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) {
this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.credential = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
* <br/>
* @return the consistency level
*/
ConsistencyLevel getConsistencyLevel() {
return this.desiredConsistencyLevel;
}
/**
* Sets the {@link ConsistencyLevel} to be used
* <br/>
* By default, {@link ConsistencyLevel
*
* @param desiredConsistencyLevel {@link ConsistencyLevel}
* @return current Builder
*/
public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
/**
* Gets the (@link ConnectionPolicy) to be used
*
* @return the connection policy
*/
ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
/**
* Gets the {@link AzureKeyCredential} to be used
*
* @return {@link AzureKeyCredential}
*/
AzureKeyCredential getCredential() {
return credential;
}
/**
* Gets the {@link CosmosContainerProactiveInitConfig} to be used
*
* @return {@link CosmosContainerProactiveInitConfig}
* */
CosmosContainerProactiveInitConfig getProactiveContainerInitConfig() {
return proactiveContainerInitConfig;
}
/**
* Sets the {@link AzureKeyCredential} to be used
*
* @param credential {@link AzureKeyCredential}
* @return current cosmosClientBuilder
*/
@Override
public CosmosClientBuilder credential(AzureKeyCredential credential) {
this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null.");
this.keyOrResourceToken = null;
this.cosmosAuthorizationTokenResolver = null;
this.permissions = null;
this.tokenCredential = null;
return this;
}
/**
* Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it
* on the client.
* <br/>
* By-default, this is false.
*
* @return a boolean indicating whether payload will be included in the response or not
*/
boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
/**
* Sets the boolean to only return the headers and status code in Cosmos DB response
* in case of Create, Update and Delete operations on CosmosItem.
* <br/>
* If set to false (which is by default), service doesn't return payload in the response. It reduces networking
* and CPU load by not sending the payload back over the network and serializing it on the client.
* <br/>
* This feature does not impact RU usage for read or write operations.
* <br/>
* By-default, this is false.
*
* @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not
* @return current cosmosClientBuilder
*/
public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) {
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
return this;
}
/**
* Sets the default GATEWAY connection configuration to be used.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode() {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the GATEWAY connection configuration to be used.
*
* @param gatewayConnectionConfig gateway connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) {
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* Sets the default DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode() {
this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig();
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* <br/>
* By default, the builder is initialized with directMode()
*
* @param directConnectionConfig direct connection configuration
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
return this;
}
/**
* Sets the DIRECT connection configuration to be used.
* gatewayConnectionConfig - represents basic configuration to be used for gateway client.
* <br/>
* Even in direct connection mode, some of the meta data operations go through gateway client,
* <br/>
* Setting gateway connection config in this API doesn't affect the connection mode,
* which will be Direct in this case.
*
* @param directConnectionConfig direct connection configuration to be used
* @param gatewayConnectionConfig gateway connection configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) {
this.directConnectionConfig = directConnectionConfig;
this.gatewayConnectionConfig = gatewayConnectionConfig;
return this;
}
/**
* sets the value of the user-agent suffix.
*
* @param userAgentSuffix The value to be appended to the user-agent header, this is
* used for monitoring purposes.
*
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) {
this.userAgentSuffix = userAgentSuffix;
return this;
}
/**
* Sets the retry policy options associated with the DocumentClient instance.
* <p>
* Properties in the RetryOptions class allow application to customize the built-in
* retry policies. This property is optional. When it's not set, the SDK uses the
* default values for configuring the retry policies. See RetryOptions class for
* more details.
*
* @param throttlingRetryOptions the RetryOptions instance.
* @return current CosmosClientBuilder
* @throws IllegalArgumentException thrown if an error occurs
*/
public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) {
this.throttlingRetryOptions = throttlingRetryOptions;
return this;
}
/**
* Sets the preferred regions for geo-replicated database accounts. For example,
* "East US" as the preferred region.
* <p>
* When EnableEndpointDiscovery is true and PreferredRegions is non-empty,
* the SDK will prefer to use the regions in the container in the order
* they are specified to perform operations.
* <p>
* If EnableEndpointDiscovery is set to false, this property is ignored.
*
* @param preferredRegions the list of preferred regions.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder preferredRegions(List<String> preferredRegions) {
this.preferredRegions = preferredRegions;
return this;
}
/**
* Sets the flag to enable endpoint discovery for geo-replicated database accounts.
* <p>
* When EnableEndpointDiscovery is true, the SDK will automatically discover the
* current write and read regions to ensure requests are sent to the correct region
* based on the capability of the region and the user's preference.
* <p>
* The default value for this property is true indicating endpoint discovery is enabled.
*
* @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) {
this.endpointDiscoveryEnabled = endpointDiscoveryEnabled;
return this;
}
/**
* Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated
* database accounts.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) {
this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled;
return this;
}
/**
* Sets the flag to enable client telemetry which will periodically collect
* database operations aggregation statistics, system information like cpu/memory
* and send it to cosmos monitoring service, which will be helpful during debugging.
*<p>
* DEFAULT value is false indicating this is opt in feature, by default no telemetry collection.
*
* @param clientTelemetryEnabled flag to enable client telemetry.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) {
this.clientTelemetryEnabledOverride = clientTelemetryEnabled;
return this;
}
/**
* Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of
* Azure Cosmos DB service.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) {
this.readRequestsFallbackEnabled = readRequestsFallbackEnabled;
return this;
}
/**
* Enables automatic retries for write operations even when the SDK can't
* guarantee that they are idempotent. This is the default behavior for the entire Cosmos client - the policy can be
* overridden for individual operations in the request options.
* <br/>>
* NOTE: the setting on the CosmosClientBuilder will determine the default behavior for Create, Replace,
* Upsert and Delete operations. It can be overridden on per-request base in the request options. For patch
* operations by default (unless overridden in the request options) retries are always disabled by default.
* <br/>
* - Create: retries can result in surfacing (more) 409-Conflict requests to the application when a retry tries
* to create a document that the initial attempt successfully created. When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 409-Conflict caused by retries.
* <br/>
* - Replace: retries can result in surfacing (more) 412-Precondition failure requests to the application when a
* replace operations are using a pre-condition check (etag) and a retry tries to update a document that the
* initial attempt successfully updated (causing the etag to change). When enabling
* useTrackingIdPropertyForCreateAndReplace this can be avoided for 412-Precondition failures caused by retries.
* <br/>
* - Delete: retries can result in surfacing (more) 404-NotFound requests when a delete operation is retried and the
* initial attempt succeeded. Ideally, write retries should only be enabled when applications can gracefully
* handle 404 - Not Found.
* <br/>
* - Upsert: retries can result in surfacing a 200 - looking like the document was updated when actually the
* document has been created by the initial attempt - so logically within the same operation. This will only
* impact applications who have special casing for 201 vs. 200 for upsert operations.
* <br/>
* Patch: retries for patch can but will not always be idempotent - it completely depends on the patch operations
* being executed and the precondition filters being used. Before enabling write retries for patch this needs
* to be carefully reviewed and tests - which is wht retries for patch can only be enabled on request options
* - any CosmosClient wide configuration will be ignored.
* <br/>
* Bulk/Delete by PK/Transactional Batch/Stored Procedure execution: No automatic retries are supported.
* @param options the options controlling whether non-idempotent write operations should be retried and whether
* trackingIds can be used.
* @return the CosmosItemRequestOptions
*/
public CosmosClientBuilder nonIdempotentWriteRetryOptions(NonIdempotentWriteRetryOptions options) {
checkNotNull(options, "Argument 'options' must not be null.");
if (options.isEnabled()) {
if (options.isTrackingIdUsed()) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
} else {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
}
} else {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
return this;
}
WriteRetryPolicy getNonIdempotentWriteRetryPolicy()
{
return this.writeRetryPolicy;
}
void resetNonIdempotentWriteRetryPolicy()
{
String writePolicyName = Configs.getNonIdempotentWriteRetryPolicy();
if (writePolicyName != null) {
if (writePolicyName.equalsIgnoreCase("NO_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_TRACKING_ID")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_TRACKING_ID;
return;
} else if (writePolicyName.equalsIgnoreCase("WITH_RETRIES")) {
this.writeRetryPolicy = WriteRetryPolicy.WITH_RETRIES;
return;
}
}
this.writeRetryPolicy = WriteRetryPolicy.DISABLED;
}
void resetSessionCapturingType() {
String sessionCapturingType = Configs.getSessionCapturingType();
if (!StringUtils.isEmpty(sessionCapturingType)) {
if (sessionCapturingType.equalsIgnoreCase("REGION_SCOPED")) {
logger.info("Session capturing type is set to REGION_SCOPED");
this.isRegionScopedSessionCapturingEnabled = true;
} else {
logger.info("Session capturing type is set to {} which is not a known session capturing type.", sessionCapturingType);
this.isRegionScopedSessionCapturingEnabled = false;
}
}
}
/**
* Sets the {@link CosmosContainerProactiveInitConfig} which enable warming up of caches and connections
* associated with containers obtained from {@link CosmosContainerProactiveInitConfig
* obtained from the first <em>k</em> preferred regions where <em>k</em> evaluates to {@link CosmosContainerProactiveInitConfig
*
* <p>
* Use the {@link CosmosContainerProactiveInitConfigBuilder} class to instantiate {@link CosmosContainerProactiveInitConfig} class
* </p>
* @param proactiveContainerInitConfig which encapsulates a list of container identities and no of
* proactive connection regions
* @return current CosmosClientBuilder
* */
public CosmosClientBuilder openConnectionsAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
this.proactiveContainerInitConfig = proactiveContainerInitConfig;
return this;
}
/**
* Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} on the client
* @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder endToEndOperationLatencyPolicyConfig(CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig){
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
return this;
}
/**
* Sets the {@link SessionRetryOptions} instance on the client.
* <p>
* This setting helps in optimizing retry behavior associated with
* {@code NOT_FOUND / READ_SESSION_NOT_AVAILABLE} or {@code 404 / 1002} scenarios which happen
* when the targeted consistency used by the request is <i>Session Consistency</i> and a
* request goes to a region that does not have recent enough data which the
* request is looking for.
* <p>
* DISCLAIMER: Setting {@link SessionRetryOptions} will modify retry behavior
* for all operations or workloads executed through this instance of the client.
* <p>
* For multi-write accounts:
* <ul>
* <li>
* For a read request going to a local read region, it is possible to optimize
* availability by having the request be retried on a different write region since
* the other write region might have more upto date data.
* </li>
* <li>
* For a read request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* <li>
* For a write request going to a local write region, it could help to
* switch to a different write region right away provided the local write region
* does not have the most up to date data.
* </li>
* </ul>
* For single-write accounts:
* <ul>
* <li>
* If a read request goes to a local read region, it helps to switch to the write region quicker.
* </li>
* <li>
* If a read request goes to a write region, the {@link SessionRetryOptions} setting does not
* matter since the write region in a single-write account has the most up to date data.
* </li>
* <li>
* For a write to a write region in a single-write account, {@code READ_SESSION_NOT_AVAILABLE} errors
* do not apply since the write-region always has the most recent version of the data
* and all writes go to the primary replica in this region. Therefore, replication lags causing errors
* is not applicable here.
* </li>
* </ul>
* About region switch hints:
* <ul>
* <li>In order to prioritize the local region for retries, use the hint {@link CosmosRegionSwitchHint
* <li>In order to move retries to a different / remote region quicker, use the hint {@link CosmosRegionSwitchHint
* </ul>
* Operations supported:
* <ul>
* <li>Read</li>
* <li>Query</li>
* <li>Create</li>
* <li>Replace</li>
* <li>Upsert</li>
* <li>Delete</li>
* <li>Patch</li>
* <li>Batch</li>
* <li>Bulk</li>
* </ul>
*
* @param sessionRetryOptions The {@link SessionRetryOptions} instance.
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder sessionRetryOptions(SessionRetryOptions sessionRetryOptions) {
this.sessionRetryOptions = sessionRetryOptions;
return this;
}
/**
* Sets a {@link Supplier<CosmosExcludedRegions>} which returns a {@link CosmosExcludedRegions} instance when {@link Supplier
* The request will not be routed to regions present in {@link CosmosExcludedRegions
* for hedging scenarios and retry scenarios for the workload executed through this instance
* of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @param excludedRegionsSupplier the supplier which returns a {@code CosmosExcludedRegions} instance.
* @return current CosmosClientBuilder.
* */
public CosmosClientBuilder excludedRegionsSupplier(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) {
this.cosmosExcludedRegionsSupplier = excludedRegionsSupplier;
return this;
}
/**
* Gets the regions to exclude from the list of preferred regions. A request will not be
* routed to these excluded regions for non-retry and retry scenarios
* for the workload executed through this instance of {@link CosmosClient} / {@link CosmosAsyncClient}.
*
* @return the list of regions to exclude.
* */
Set<String> getExcludedRegions() {
if (this.cosmosExcludedRegionsSupplier != null && this.cosmosExcludedRegionsSupplier.get() != null) {
return this.cosmosExcludedRegionsSupplier.get().getExcludedRegions();
}
return new HashSet<>();
}
SessionRetryOptions getSessionRetryOptions() {
return this.sessionRetryOptions;
}
/**
* Gets the {@link CosmosEndToEndOperationLatencyPolicyConfig}
* @return the {@link CosmosEndToEndOperationLatencyPolicyConfig}
*/
CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationConfig() {
return this.cosmosEndToEndOperationLatencyPolicyConfig;
}
/**
* Gets the GATEWAY connection configuration to be used.
*
* @return gateway connection config
*/
GatewayConnectionConfig getGatewayConnectionConfig() {
return gatewayConnectionConfig;
}
/**
* Gets the DIRECT connection configuration to be used.
*
* @return direct connection config
*/
DirectConnectionConfig getDirectConnectionConfig() {
return directConnectionConfig;
}
/**
* Gets the value of user-agent suffix.
*
* @return the value of user-agent suffix.
*/
String getUserAgentSuffix() {
return userAgentSuffix;
}
/**
* Gets the retry policy options associated with the DocumentClient instance.
*
* @return the RetryOptions instance.
*/
ThrottlingRetryOptions getThrottlingRetryOptions() {
return throttlingRetryOptions;
}
/**
* Gets the preferred regions for geo-replicated database accounts
*
* @return the list of preferred region.
*/
List<String> getPreferredRegions() {
return preferredRegions != null ? preferredRegions : Collections.emptyList();
}
/**
* Gets the flag to enable endpoint discovery for geo-replicated database accounts.
*
* @return whether endpoint discovery is enabled.
*/
boolean isEndpointDiscoveryEnabled() {
return endpointDiscoveryEnabled;
}
/**
* Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure
* Cosmos DB service.
* <p>
* When the value of this property is true, the SDK will direct write operations to
* available writable regions of geo-replicated database account. Writable regions
* are ordered by PreferredRegions property. Setting the property value
* to true has no effect until EnableMultipleWriteRegions in DatabaseAccount
* is also set to true.
* <p>
* DEFAULT value is true indicating that writes are directed to
* available writable regions of geo-replicated database account.
*
* @return flag to enable writes on any regions for geo-replicated database accounts.
*/
boolean isMultipleWriteRegionsEnabled() {
return multipleWriteRegionsEnabled;
}
/**
* Gets the flag to enabled client telemetry.
*
* @return flag to enable client telemetry.
*/
boolean isClientTelemetryEnabled() {
Boolean explicitlySetInConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig);
if (this.clientTelemetryEnabledOverride != null) {
return this.clientTelemetryEnabledOverride;
}
if (explicitlySetInConfig != null) {
return explicitlySetInConfig;
}
return ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED;
}
/**
* Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
* <p>
* DEFAULT value is true.
* <p>
* If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness,
* The default is false for Bounded Staleness.
* 1. {@link
* 2. the Azure Cosmos DB account has more than one region
*
* @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service.
*/
boolean isReadRequestsFallbackEnabled() {
return readRequestsFallbackEnabled;
}
/**
* Returns the client telemetry config instance for this builder
* @return the client telemetry config instance for this builder
*/
CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.clientTelemetryConfig;
}
/**
* Returns the client telemetry config instance for this builder
* @param telemetryConfig the client telemetry configuration to be used
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder clientTelemetryConfig(CosmosClientTelemetryConfig telemetryConfig) {
ifThrowIllegalArgException(telemetryConfig == null,
"Parameter 'telemetryConfig' must not be null.");
Boolean explicitValueFromConfig = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(telemetryConfig);
if (explicitValueFromConfig != null) {
this.clientTelemetryEnabledOverride = null;
}
this.clientTelemetryConfig = telemetryConfig;
return this;
}
/**
* Sets a custom serializer that should be used for conversion between POJOs and Json payload stored in the
* Cosmos DB service. The custom serializer can also be specified in request options. If defined here and
* in request options the serializer defined in request options will be used.
* @param customItemSerializer the custom serializer to be used for item payload transformations
* @return current CosmosClientBuilder
*/
public CosmosClientBuilder customItemSerializer(CosmosItemSerializer customItemSerializer) {
this.defaultCustomSerializer = customItemSerializer;
return this;
}
CosmosItemSerializer getCustomItemSerializer() {
return this.defaultCustomSerializer;
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
public CosmosAsyncClient buildAsyncClient() {
return buildAsyncClient(true);
}
/**
* Builds a cosmos async client with the provided properties
*
* @return CosmosAsyncClient
*/
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
/**
* Builds a cosmos sync client with the provided properties
*
* @return CosmosClient
*/
public CosmosClient buildClient() {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosClient cosmosClient = new CosmosClient(this);
if (proactiveContainerInitConfig != null) {
cosmosClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosClient.openConnectionsAndInitCaches();
}
cosmosClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
}
logStartupInfo(stopwatch, cosmosClient.asyncClient());
return cosmosClient;
}
ConnectionPolicy buildConnectionPolicy() {
if (this.directConnectionConfig != null) {
if (this.gatewayConnectionConfig == null) {
this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig();
}
this.connectionPolicy = new ConnectionPolicy(directConnectionConfig, gatewayConnectionConfig);
} else if (gatewayConnectionConfig != null) {
this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig);
}
this.connectionPolicy.setPreferredRegions(this.preferredRegions);
this.connectionPolicy.setExcludedRegionsSupplier(this.cosmosExcludedRegionsSupplier);
this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix);
this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions);
this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled);
this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled);
this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled);
return this.connectionPolicy;
}
private void validateConfig() {
URI uri;
try {
uri = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("invalid serviceEndpoint", e);
}
if (preferredRegions != null) {
preferredRegions.forEach(
preferredRegion -> {
Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty");
String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", "");
LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion);
}
);
}
if (proactiveContainerInitConfig != null) {
Preconditions.checkArgument(preferredRegions != null, "preferredRegions cannot be null when proactiveContainerInitConfig has been set");
Preconditions.checkArgument(this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() <= this.preferredRegions.size(), "no. of regions to proactively connect to " +
"cannot be greater than the no.of preferred regions");
if (this.proactiveContainerInitConfig.getProactiveConnectionRegionsCount() > 1) {
Preconditions.checkArgument(this.isEndpointDiscoveryEnabled(), "endpoint discovery should be enabled when no. " +
"of proactive regions is greater than 1");
}
}
ifThrowIllegalArgException(this.serviceEndpoint == null,
"cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty())
&& this.credential == null && this.tokenCredential == null && this.cosmosAuthorizationTokenResolver == null,
"cannot buildAsyncClient client without any one of key, resource token, permissions, and "
+ "azure key credential");
ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()),
"cannot buildAsyncClient client without key credential");
}
Configs configs() {
return configs;
}
/**
* Configs
*
* @return current cosmosClientBuilder
*/
CosmosClientBuilder configs(Configs configs) {
this.configs = configs;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
private void logStartupInfo(StopWatch stopwatch, CosmosAsyncClient client) {
stopwatch.stop();
if (logger.isWarnEnabled()) {
long time = stopwatch.getTime();
String diagnosticsCfg = "";
String tracingCfg = "";
if (client.getClientTelemetryConfig() != null) {
diagnosticsCfg = client.getClientTelemetryConfig().toString();
}
DiagnosticsProvider provider = client.getDiagnosticsProvider();
if (provider != null) {
tracingCfg = provider.getTraceConfigLog();
}
logger.warn("Cosmos Client with (Correlation) ID [{}] started up in [{}] ms with the following " +
"configuration: serviceEndpoint [{}], preferredRegions [{}], excludedRegions [{}], connectionPolicy [{}], " +
"consistencyLevel [{}], contentResponseOnWriteEnabled [{}], sessionCapturingOverride [{}], " +
"connectionSharingAcrossClients [{}], clientTelemetryEnabled [{}], proactiveContainerInit [{}], " +
"diagnostics [{}], tracing [{}], nativeTransport [{}] fastClientOpen [{}] isRegionScopedSessionCapturingEnabled [{}]",
client.getContextClient().getClientCorrelationId(), time, getEndpoint(), getPreferredRegions(), getExcludedRegions(),
getConnectionPolicy(), getConsistencyLevel(), isContentResponseOnWriteEnabled(),
isSessionCapturingOverrideEnabled(), isConnectionSharingAcrossClientsEnabled(),
isClientTelemetryEnabled(), getProactiveContainerInitConfig(), diagnosticsCfg,
tracingCfg, io.netty.channel.epoll.Epoll.isAvailable(),
io.netty.channel.epoll.Epoll.isTcpFastOpenClientSideAvailable(), isRegionScopedSessionCapturingEnabled());
}
}
static void initialize() {
CosmosClientBuilderHelper.setCosmosClientBuilderAccessor(
new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() {
@Override
public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder,
CosmosClientMetadataCachesSnapshot metadataCache) {
builder.metadataCaches(metadataCache);
}
@Override
public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) {
return builder.metadataCaches();
}
@Override
public void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType) {
builder.setApiType(apiType);
}
@Override
public ApiType getCosmosClientApiType(CosmosClientBuilder builder) {
return builder.apiType();
}
@Override
public ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder) {
return builder.getConnectionPolicy();
}
@Override
public ConnectionPolicy buildConnectionPolicy(CosmosClientBuilder builder) {
return builder.buildConnectionPolicy();
}
@Override
public Configs getConfigs(CosmosClientBuilder builder) {
return builder.configs();
}
@Override
public ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder) {
return builder.getConsistencyLevel();
}
@Override
public String getEndpoint(CosmosClientBuilder builder) {
return builder.getEndpoint();
}
@Override
public CosmosItemSerializer getDefaultCustomSerializer(CosmosClientBuilder builder) {
return builder.getCustomItemSerializer();
}
@Override
public void setRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder, boolean isRegionScopedSessionCapturingEnabled) {
builder.regionScopedSessionCapturingEnabled(isRegionScopedSessionCapturingEnabled);
}
@Override
public boolean getRegionScopedSessionCapturingEnabled(CosmosClientBuilder builder) {
return builder.isRegionScopedSessionCapturingEnabled();
}
});
}
static { initialize(); }
} |
I think this should be an exception not a null return. This is an invalid feature flag which we typically throw errors on or default to a false value. | public static ZonedDateTime convertStringToDate(String timeStr) {
if (!StringUtils.hasText(timeStr)) {
return null;
}
try {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.ISO_DATE_TIME);
} catch (final DateTimeParseException e) {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.RFC_1123_DATE_TIME);
}
} | return null; | public static ZonedDateTime convertStringToDate(String timeStr) {
if (!StringUtils.hasText(timeStr)) {
return null;
}
try {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.ISO_DATE_TIME);
} catch (final DateTimeParseException e) {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.RFC_1123_DATE_TIME);
}
} | class TimeWindowUtils {
public static int passingDaysOfWeek(DayOfWeek today, String firstDayOfWeek) {
int remainingDays = (convertToWeekDayNumber(today) - convertToWeekDayNumber(firstDayOfWeek));
if (remainingDays < 0) {
return remainingDays + RecurrenceConstants.DAYS_PER_WEEK;
} else {
return remainingDays;
}
}
public static int convertToWeekDayNumber(String str) {
final String strUpperCase = str.toUpperCase();
return DayOfWeek.valueOf(strUpperCase).getValue() % 7;
}
public static int convertToWeekDayNumber(DayOfWeek dateTime) {
return dateTime.getValue() % 7;
}
public static List<DayOfWeek> sortDaysOfWeek(List<String> daysOfWeek, String firstDayOfWeek) {
final List<DayOfWeek> result = daysOfWeek.stream()
.map(str -> DayOfWeek.valueOf(str.toUpperCase()))
.collect(Collectors.toList());
final int firstDayNum = TimeWindowUtils.convertToWeekDayNumber(firstDayOfWeek);
Collections.sort(result, (a, b) -> {
int aIndex = (TimeWindowUtils.convertToWeekDayNumber(a) - firstDayNum + RecurrenceConstants.DAYS_PER_WEEK) % 7;
int bIndex = (TimeWindowUtils.convertToWeekDayNumber(b) - firstDayNum + RecurrenceConstants.DAYS_PER_WEEK) % 7;
return aIndex - bIndex;
});
return result;
}
} | class TimeWindowUtils {
/**
* Calculates the offset in days between two given days of the week.
* @param today DayOfWeek enum of today
* @param firstDayOfWeek the start day of the week
* @return the number of days passed
* */
public static int getPassedWeekDays(DayOfWeek today, DayOfWeek firstDayOfWeek) {
return (today.getValue() - firstDayOfWeek.getValue() + RecurrenceConstants.DAYS_PER_WEEK) % 7;
}
public static List<DayOfWeek> sortDaysOfWeek(List<DayOfWeek> daysOfWeek, DayOfWeek firstDayOfWeek) {
final List<DayOfWeek> result = new ArrayList<>(daysOfWeek);
Collections.sort(result, Comparator.comparingInt(a -> getPassedWeekDays(a, firstDayOfWeek)));
return result;
}
} |
I tried to resolve all invalid cases in the `RecurrenceValidator`, so I returned `null` here and throw exception in `RecurrenceValidator` when start is null. I agreed it's better that we just throw exception here. I'll try this way. | public static ZonedDateTime convertStringToDate(String timeStr) {
if (!StringUtils.hasText(timeStr)) {
return null;
}
try {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.ISO_DATE_TIME);
} catch (final DateTimeParseException e) {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.RFC_1123_DATE_TIME);
}
} | return null; | public static ZonedDateTime convertStringToDate(String timeStr) {
if (!StringUtils.hasText(timeStr)) {
return null;
}
try {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.ISO_DATE_TIME);
} catch (final DateTimeParseException e) {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.RFC_1123_DATE_TIME);
}
} | class TimeWindowUtils {
public static int passingDaysOfWeek(DayOfWeek today, String firstDayOfWeek) {
int remainingDays = (convertToWeekDayNumber(today) - convertToWeekDayNumber(firstDayOfWeek));
if (remainingDays < 0) {
return remainingDays + RecurrenceConstants.DAYS_PER_WEEK;
} else {
return remainingDays;
}
}
public static int convertToWeekDayNumber(String str) {
final String strUpperCase = str.toUpperCase();
return DayOfWeek.valueOf(strUpperCase).getValue() % 7;
}
public static int convertToWeekDayNumber(DayOfWeek dateTime) {
return dateTime.getValue() % 7;
}
public static List<DayOfWeek> sortDaysOfWeek(List<String> daysOfWeek, String firstDayOfWeek) {
final List<DayOfWeek> result = daysOfWeek.stream()
.map(str -> DayOfWeek.valueOf(str.toUpperCase()))
.collect(Collectors.toList());
final int firstDayNum = TimeWindowUtils.convertToWeekDayNumber(firstDayOfWeek);
Collections.sort(result, (a, b) -> {
int aIndex = (TimeWindowUtils.convertToWeekDayNumber(a) - firstDayNum + RecurrenceConstants.DAYS_PER_WEEK) % 7;
int bIndex = (TimeWindowUtils.convertToWeekDayNumber(b) - firstDayNum + RecurrenceConstants.DAYS_PER_WEEK) % 7;
return aIndex - bIndex;
});
return result;
}
} | class TimeWindowUtils {
/**
* Calculates the offset in days between two given days of the week.
* @param today DayOfWeek enum of today
* @param firstDayOfWeek the start day of the week
* @return the number of days passed
* */
public static int getPassedWeekDays(DayOfWeek today, DayOfWeek firstDayOfWeek) {
return (today.getValue() - firstDayOfWeek.getValue() + RecurrenceConstants.DAYS_PER_WEEK) % 7;
}
public static List<DayOfWeek> sortDaysOfWeek(List<DayOfWeek> daysOfWeek, DayOfWeek firstDayOfWeek) {
final List<DayOfWeek> result = new ArrayList<>(daysOfWeek);
Collections.sort(result, Comparator.comparingInt(a -> getPassedWeekDays(a, firstDayOfWeek)));
return result;
}
} |
I know that we've already got this in `getPreviousOccurrence` method ``` java if (now.isBefore(start)) { return emptyOccurrence; } ``` Can we also set a gate in this method to prevent now < start which will cause negative numberOfOccurrences | private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
} | final ZonedDateTime start = settings.getStart(); | private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.END_DATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
Can we have a variable for settings.getStart() instead of calling it repeatedly? Just like what you did for `interval` and `pattern`. It will make the code look more consistent. ``` java ZonedDateTime start = settings.getStart(); ``` | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays( | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
I propose the variable name: firstDayOfMostRecentOccurringWeek or firstDayOfMostRecentWeekWithOcurrence | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays( | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
> // now is after the first week this comment is not correct Suggest: "now is not within the most recent occurring week" | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | |
could you move the comment to next line please? This comment does not explain the if condition The comment could be "move to the last occurrence in the previous occurring week" | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | if (now.isBefore(dayWithMinOffset)) { | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
I don't think we need this check here. `getDailyPreviousOccurrence` is only called when now is after start. | private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
} | final ZonedDateTime start = settings.getStart(); | private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.END_DATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
Updated | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays( | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
Updated to `firstDayOfMostRecentOccurringWeek ` | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays( | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
Updated | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | |
Updated | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime firstDayOfFirstWeek = settings.getStart().minusDays(
TimeWindowUtils.daysPassedWeekStart(settings.getStart().getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurrence = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence = null;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(settings.getStart().getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurrence.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurrence.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(minDayOffset);
if (settings.getStart().isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = settings.getStart();
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurrence.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurrence.plusDays(
TimeWindowUtils.daysPassedWeekStart(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | if (now.isBefore(dayWithMinOffset)) { | private static OccurrenceInfo getWeeklyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrencePattern pattern = settings.getRecurrence().getPattern();
final int interval = pattern.getInterval();
final ZonedDateTime start = settings.getStart();
final ZonedDateTime firstDayOfFirstWeek = start.minusDays(
TimeWindowUtils.getPassedWeekDays(start.getDayOfWeek(), pattern.getFirstDayOfWeek()));
final long numberOfInterval = Duration.between(firstDayOfFirstWeek, now).toSeconds()
/ Duration.ofDays((long) interval * RecurrenceConstants.DAYS_PER_WEEK).toSeconds();
final ZonedDateTime firstDayOfMostRecentOccurringWeek = firstDayOfFirstWeek.plusDays(
numberOfInterval * (interval * RecurrenceConstants.DAYS_PER_WEEK));
final List<DayOfWeek> sortedDaysOfWeek = TimeWindowUtils.sortDaysOfWeek(pattern.getDaysOfWeek(), pattern.getFirstDayOfWeek());
final int maxDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(sortedDaysOfWeek.size() - 1), pattern.getFirstDayOfWeek());
final int minDayOffset = TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(0), pattern.getFirstDayOfWeek());
ZonedDateTime mostRecentOccurrence;
int numberOfOccurrences = (int) (numberOfInterval * sortedDaysOfWeek.size()
- (sortedDaysOfWeek.indexOf(start.getDayOfWeek())));
if (now.isAfter(firstDayOfMostRecentOccurringWeek.plusDays(RecurrenceConstants.DAYS_PER_WEEK))) {
numberOfOccurrences += sortedDaysOfWeek.size();
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.plusDays(maxDayOffset);
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
}
ZonedDateTime dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(minDayOffset);
if (start.isAfter(dayWithMinOffset)) {
numberOfOccurrences = 0;
dayWithMinOffset = start;
}
if (now.isBefore(dayWithMinOffset)) {
mostRecentOccurrence = firstDayOfMostRecentOccurringWeek.minusDays(interval * RecurrenceConstants.DAYS_PER_WEEK).plusDays(maxDayOffset);
} else {
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
for (int i = sortedDaysOfWeek.indexOf(dayWithMinOffset.getDayOfWeek()) + 1; i < sortedDaysOfWeek.size(); i++) {
dayWithMinOffset = firstDayOfMostRecentOccurringWeek.plusDays(
TimeWindowUtils.getPassedWeekDays(sortedDaysOfWeek.get(i), pattern.getFirstDayOfWeek()));
if (now.isBefore(dayWithMinOffset)) {
break;
}
mostRecentOccurrence = dayWithMinOffset;
numberOfOccurrences++;
}
}
return new OccurrenceInfo(mostRecentOccurrence, numberOfOccurrences);
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
final RecurrenceValidator validator = new RecurrenceValidator(settings);
if (!validator.validateSettings()) {
return false;
}
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now).previousOccurrence;
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static OccurrenceInfo getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
OccurrenceInfo emptyOccurrence = new OccurrenceInfo();
if (now.isBefore(start)) {
return emptyOccurrence;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
if (range.getType() == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return emptyOccurrence;
}
if (range.getType() == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfRecurrences()) {
return emptyOccurrence;
}
return occurrenceInfo;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo() {
this.previousOccurrence = null;
this.numberOfOccurrences = 0;
}
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} | class RecurrenceEvaluator {
/**
* Checks if a provided timestamp is within any recurring time window specified
* by the Recurrence section in the time window filter settings.
* @return True if the time stamp is within any recurring time window, false otherwise.
*/
public static boolean isMatch(TimeWindowFilterSettings settings, ZonedDateTime now) {
RecurrenceValidator.validateSettings(settings);
final ZonedDateTime previousOccurrence = getPreviousOccurrence(settings, now);
if (previousOccurrence == null) {
return false;
}
final ZonedDateTime occurrenceEndDate = previousOccurrence.plus(
Duration.between(settings.getStart(), settings.getEnd()));
return now.isBefore(occurrenceEndDate);
}
/**
* Find the most recent recurrence occurrence before the provided time stamp.
*
* @return The closest previous occurrence.
*/
private static ZonedDateTime getPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
ZonedDateTime start = settings.getStart();
if (now.isBefore(start)) {
return null;
}
final RecurrencePatternType patternType = settings.getRecurrence().getPattern().getType();
OccurrenceInfo occurrenceInfo;
if (patternType == RecurrencePatternType.DAILY) {
occurrenceInfo = getDailyPreviousOccurrence(settings, now);
} else {
occurrenceInfo = getWeeklyPreviousOccurrence(settings, now);
}
final RecurrenceRange range = settings.getRecurrence().getRange();
final RecurrenceRangeType rangeType = range.getType();
if (rangeType == RecurrenceRangeType.ENDDATE
&& occurrenceInfo.previousOccurrence != null
&& occurrenceInfo.previousOccurrence.isAfter(range.getEndDate())) {
return null;
}
if (rangeType == RecurrenceRangeType.NUMBERED
&& occurrenceInfo.numberOfOccurrences > range.getNumberOfOccurrences()) {
return null;
}
return occurrenceInfo.previousOccurrence;
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Daily" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of complete recurrence intervals which have occurred between the time and the recurrence start.
*/
private static OccurrenceInfo getDailyPreviousOccurrence(TimeWindowFilterSettings settings, ZonedDateTime now) {
final ZonedDateTime start = settings.getStart();
final int interval = settings.getRecurrence().getPattern().getInterval();
final int numberOfOccurrences = (int) (Duration.between(start, now).getSeconds() / Duration.ofDays(interval).getSeconds());
return new OccurrenceInfo(start.plusDays((long) numberOfOccurrences * interval), numberOfOccurrences + 1);
}
/**
* Find the closest previous recurrence occurrence before the provided time stamp according to the "Weekly" recurrence pattern.
*
* @return The return result contains two property, one is previousOccurrence, the other is numberOfOccurrences.
* previousOccurrence: The closest previous occurrence.
* numberOfOccurrences: The number of recurring days of week which have occurred between the time and the recurrence start.
*/
private static class OccurrenceInfo {
private final ZonedDateTime previousOccurrence;
private final int numberOfOccurrences;
OccurrenceInfo(ZonedDateTime dateTime, int num) {
this.previousOccurrence = dateTime;
this.numberOfOccurrences = num;
}
}
} |
I just was looking this over, `null` here is actually correct as it's also used in the normal time window filter code where not text/empty string is valid. | public static ZonedDateTime convertStringToDate(String timeStr) {
if (!StringUtils.hasText(timeStr)) {
return null;
}
try {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.ISO_DATE_TIME);
} catch (final DateTimeParseException e) {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.RFC_1123_DATE_TIME);
}
} | return null; | public static ZonedDateTime convertStringToDate(String timeStr) {
if (!StringUtils.hasText(timeStr)) {
return null;
}
try {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.ISO_DATE_TIME);
} catch (final DateTimeParseException e) {
return ZonedDateTime.parse(timeStr, DateTimeFormatter.RFC_1123_DATE_TIME);
}
} | class TimeWindowUtils {
public static int passingDaysOfWeek(DayOfWeek today, String firstDayOfWeek) {
int remainingDays = (convertToWeekDayNumber(today) - convertToWeekDayNumber(firstDayOfWeek));
if (remainingDays < 0) {
return remainingDays + RecurrenceConstants.DAYS_PER_WEEK;
} else {
return remainingDays;
}
}
public static int convertToWeekDayNumber(String str) {
final String strUpperCase = str.toUpperCase();
return DayOfWeek.valueOf(strUpperCase).getValue() % 7;
}
public static int convertToWeekDayNumber(DayOfWeek dateTime) {
return dateTime.getValue() % 7;
}
public static List<DayOfWeek> sortDaysOfWeek(List<String> daysOfWeek, String firstDayOfWeek) {
final List<DayOfWeek> result = daysOfWeek.stream()
.map(str -> DayOfWeek.valueOf(str.toUpperCase()))
.collect(Collectors.toList());
final int firstDayNum = TimeWindowUtils.convertToWeekDayNumber(firstDayOfWeek);
Collections.sort(result, (a, b) -> {
int aIndex = (TimeWindowUtils.convertToWeekDayNumber(a) - firstDayNum + RecurrenceConstants.DAYS_PER_WEEK) % 7;
int bIndex = (TimeWindowUtils.convertToWeekDayNumber(b) - firstDayNum + RecurrenceConstants.DAYS_PER_WEEK) % 7;
return aIndex - bIndex;
});
return result;
}
} | class TimeWindowUtils {
/**
* Calculates the offset in days between two given days of the week.
* @param today DayOfWeek enum of today
* @param firstDayOfWeek the start day of the week
* @return the number of days passed
* */
public static int getPassedWeekDays(DayOfWeek today, DayOfWeek firstDayOfWeek) {
return (today.getValue() - firstDayOfWeek.getValue() + RecurrenceConstants.DAYS_PER_WEEK) % 7;
}
public static List<DayOfWeek> sortDaysOfWeek(List<DayOfWeek> daysOfWeek, DayOfWeek firstDayOfWeek) {
final List<DayOfWeek> result = new ArrayList<>(daysOfWeek);
Collections.sort(result, Comparator.comparingInt(a -> getPassedWeekDays(a, firstDayOfWeek)));
return result;
}
} |
no synchronization needed - it's not the end of the world if we log it twice per process lifetime because of a race-condition. | private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
if (!slf4jErrorLogged) {
slf4jErrorLogged = true;
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} | slf4jErrorLogged = true; | private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
if (!slf4jErrorLogged) {
slf4jErrorLogged = true;
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private static boolean slf4jErrorLogged = false;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private static boolean slf4jErrorLogged = false;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
}
} |
Before doing all this computation, should we first check if logging at `info` level is enabled? | static void validateNettyVersions(Consumer<String> logger) {
Map<String, String> pomVersions = CoreUtils.getProperties(PROPERTIES_FILE_NAME);
String nettyVersion = pomVersions.get(NETTY_VERSION_PROPERTY);
String nettyTcnativeVersion = pomVersions.get(NETTY_TCNATIVE_VERSION_PROPERTY);
Map<String, Version> nettyVersions = Version.identify();
List<String> versionInformation = new ArrayList<>(11);
Set<String> nonNativeNettyVersions = new HashSet<>();
for (String artifact : REQUIRED_NETTY_VERSION_ARTIFACTS) {
Version version = nettyVersions.get(artifact);
if (version == null) {
versionInformation.add("'io.netty:" + artifact + "' (not found and is required)");
nonNativeNettyVersions.add("unknown");
} else {
versionInformation.add("'io.netty:" + artifact + "' version: " + version.artifactVersion());
nonNativeNettyVersions.add(version.artifactVersion());
}
}
for (String artifact : OPTIONAL_NETTY_VERSION_ARTIFACTS) {
Version version = nettyVersions.get(artifact);
if (version != null) {
versionInformation.add("'io.netty:" + artifact + "' version: " + version.artifactVersion());
nonNativeNettyVersions.add(version.artifactVersion());
}
}
for (String artifact : NETTY_TCNATIVE_VERSION_ARTIFACTS) {
Version version = nettyVersions.get(artifact);
if (version != null) {
versionInformation.add("'io.netty:" + artifact + "' version: " + version.artifactVersion());
}
}
String versionInformationString = CoreUtils.stringJoin(", ", versionInformation);
StringBuilder stringBuilder
= new StringBuilder().append("The following is Netty version information that was found on the classpath: ")
.append(versionInformationString)
.append(". ");
if (nonNativeNettyVersions.size() > 1) {
stringBuilder.append(NETTY_VERSION_MISMATCH_LOG);
}
stringBuilder.append("The version of azure-core-http-netty being used was built with Netty version ")
.append(nettyVersion)
.append(" and Netty Tcnative version ")
.append(nettyTcnativeVersion)
.append(". If your application runs without issue this message can be ignored, otherwise please align the "
+ "Netty versions used in your application. For more information, see "
+ "https:
logger.accept(stringBuilder.toString());
} | versionInformation.add("'io.netty:" + artifact + "' (not found and is required)"); | static void validateNettyVersions(Consumer<String> logger) {
Map<String, String> pomVersions = CoreUtils.getProperties(PROPERTIES_FILE_NAME);
String nettyVersion = pomVersions.get(NETTY_VERSION_PROPERTY);
String nettyTcnativeVersion = pomVersions.get(NETTY_TCNATIVE_VERSION_PROPERTY);
Map<String, Version> nettyVersions = Version.identify();
List<String> versionInformation = new ArrayList<>(11);
Set<String> nonNativeNettyVersions = new HashSet<>();
for (String artifact : REQUIRED_NETTY_VERSION_ARTIFACTS) {
Version version = nettyVersions.get(artifact);
if (version == null) {
versionInformation.add("'io.netty:" + artifact + "' (not found and is required)");
nonNativeNettyVersions.add("unknown");
} else {
versionInformation.add("'io.netty:" + artifact + "' version: " + version.artifactVersion());
nonNativeNettyVersions.add(version.artifactVersion());
}
}
for (String artifact : OPTIONAL_NETTY_VERSION_ARTIFACTS) {
Version version = nettyVersions.get(artifact);
if (version != null) {
versionInformation.add("'io.netty:" + artifact + "' version: " + version.artifactVersion());
nonNativeNettyVersions.add(version.artifactVersion());
}
}
for (String artifact : NETTY_TCNATIVE_VERSION_ARTIFACTS) {
Version version = nettyVersions.get(artifact);
if (version != null) {
versionInformation.add("'io.netty:" + artifact + "' version: " + version.artifactVersion());
}
}
String versionInformationString = CoreUtils.stringJoin(", ", versionInformation);
StringBuilder stringBuilder
= new StringBuilder().append("The following is Netty version information that was found on the classpath: ")
.append(versionInformationString)
.append(". ");
if (nonNativeNettyVersions.size() > 1) {
stringBuilder.append(NETTY_VERSION_MISMATCH_LOG);
}
stringBuilder.append("The version of azure-core-http-netty being used was built with Netty version ")
.append(nettyVersion)
.append(" and Netty Tcnative version ")
.append(nettyTcnativeVersion)
.append(". If your application runs without issue this message can be ignored, otherwise please align the "
+ "Netty versions used in your application. For more information, see "
+ "https:
logger.accept(stringBuilder.toString());
} | class allows for a finer grain handling.
if (reactorNettyConnection instanceof ChannelOperations) {
ChannelOperations<?, ?> channelOperations = (ChannelOperations<?, ?>) reactorNettyConnection;
if (!channelOperations.isInboundDisposed()) {
channelOperations.channel().eventLoop().execute(channelOperations::discard);
}
} | class allows for a finer grain handling.
if (reactorNettyConnection instanceof ChannelOperations) {
ChannelOperations<?, ?> channelOperations = (ChannelOperations<?, ?>) reactorNettyConnection;
if (!channelOperations.isInboundDisposed()) {
channelOperations.channel().eventLoop().execute(channelOperations::discard);
}
} |
should double be logged with %d? | public String toString() {
return String.format(
"FaultInjectionServerErrorResult{ serverErrorType=%s, times=%s, delay=%s, injectionRate=%s }",
this.serverErrorType,
this.times,
this.delay,
this.injectionRate);
} | "FaultInjectionServerErrorResult{ serverErrorType=%s, times=%s, delay=%s, injectionRate=%s }", | public String toString() {
return String.format(
"FaultInjectionServerErrorResult{ serverErrorType=%s, times=%s, delay=%s, injectionRate=%.2f%% }",
this.serverErrorType,
this.times,
this.delay,
this.injectionRate * 100);
} | class FaultInjectionServerErrorResult implements IFaultInjectionResult {
private final FaultInjectionServerErrorType serverErrorType;
private final Integer times;
private final Duration delay;
private final Boolean suppressServiceRequests;
private final double injectionRate;
FaultInjectionServerErrorResult(
FaultInjectionServerErrorType serverErrorTypes,
Integer times,
Duration delay,
Boolean suppressServiceRequests,
double injectionRate) {
this.serverErrorType = serverErrorTypes;
this.times = times;
this.delay = delay;
this.suppressServiceRequests = suppressServiceRequests;
this.injectionRate = injectionRate;
}
/***
* Get the fault injection server error type.
*
* @return the {@link FaultInjectionServerErrorType}.
*/
public FaultInjectionServerErrorType getServerErrorType() {
return serverErrorType;
}
/***
* Get the number of how many times the rule can be applied on a single operation.
*
* @return the times.
*/
public Integer getTimes() {
return times;
}
/***
* Get the injected delay for the server error.
* Will be required for SERVER_RESPONSE_DELAY and SERVER_CONNECTION_DELAY, will be ignored for others.
*
* @return the injected delay.
*/
public Duration getDelay() {
return delay;
}
/***
* Get a flag indicating whether service requests should be suppressed. If not specified (null) the default
* behavior is applied - only sending the request to the service when the delay is lower
* than the network request timeout.
* @return a flag indicating whether service requests should be suppressed.
*/
public Boolean getSuppressServiceRequests() {
return this.suppressServiceRequests;
}
/***
* Get A double between (0,1] representing the percent of times that the rule will be applied.
* Default value is 1.0 or 100%
* @return the apply percentage.
*/
public double getInjectionRate() { return this.injectionRate; }
@Override
} | class FaultInjectionServerErrorResult implements IFaultInjectionResult {
private final FaultInjectionServerErrorType serverErrorType;
private final Integer times;
private final Duration delay;
private final Boolean suppressServiceRequests;
private final double injectionRate;
FaultInjectionServerErrorResult(
FaultInjectionServerErrorType serverErrorTypes,
Integer times,
Duration delay,
Boolean suppressServiceRequests,
double injectionRate) {
this.serverErrorType = serverErrorTypes;
this.times = times;
this.delay = delay;
this.suppressServiceRequests = suppressServiceRequests;
this.injectionRate = injectionRate;
}
/***
* Get the fault injection server error type.
*
* @return the {@link FaultInjectionServerErrorType}.
*/
public FaultInjectionServerErrorType getServerErrorType() {
return serverErrorType;
}
/***
* Get the number of how many times the rule can be applied on a single operation.
*
* @return the times.
*/
public Integer getTimes() {
return times;
}
/***
* Get the injected delay for the server error.
* Will be required for SERVER_RESPONSE_DELAY and SERVER_CONNECTION_DELAY, will be ignored for others.
*
* @return the injected delay.
*/
public Duration getDelay() {
return delay;
}
/***
* Get a flag indicating whether service requests should be suppressed. If not specified (null) the default
* behavior is applied - only sending the request to the service when the delay is lower
* than the network request timeout.
* @return a flag indicating whether service requests should be suppressed.
*/
public Boolean getSuppressServiceRequests() {
return this.suppressServiceRequests;
}
/***
* Get A double between (0,1] representing the percent of times that the rule will be applied.
* Default value is 1.0 or 100%
* @return the apply percentage.
*/
public double getInjectionRate() {
return this.injectionRate;
}
@Override
} |
Change to injection rate. | public boolean isApplicable(FaultInjectionRequestArgs requestArgs) {
if (!this.isValid()) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
String.format(
"%s[Disable or Duration reached. StartTime: %s, ExpireTime: %s]",
this.id,
this.startTime,
this.expireTime)
);
return false;
}
if (!this.condition.isApplicable(this.id, requestArgs)) {
return false;
}
if (!this.result.isApplicable(this.id, requestArgs.getServiceRequest())) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
this.id + "[Per operation apply limit reached]"
);
return false;
}
if (this.result.getServerErrorType() == FaultInjectionServerErrorType.STALED_ADDRESSES_SERVER_GONE
&& requestArgs.getServiceRequest().faultInjectionRequestContext.getAddressForceRefreshed()) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
"Address force refresh happened, STALED_ADDRESSES error is cleared."
);
return false;
}
long evaluationCount = this.evaluationCount.incrementAndGet();
boolean withinHitLimit = this.hitLimit == null || evaluationCount <= this.hitLimit;
if (!withinHitLimit) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
String.format("%s [Hit Limit reached. Configured hitLimit %d, evaluationCount %d]", this.id, this.hitLimit, evaluationCount)
);
return false;
} else if (random.nextDouble() > this.result.getInjectionRate()) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
String.format("%s Apply percentage: Rule will not be applied. Configured applyPercentage %f%%", this.id, this.result.getInjectionRate() * 100)
);
return false;
} else {
this.hitCount.incrementAndGet();
String name =
requestArgs.getServiceRequest().getOperationType().toString() + "-" + requestArgs.getServiceRequest().getResourceType().toString();
this.hitCountDetails.compute(name, (key, count) -> {
if (count == null) {
count = 0L;
}
count++;
return count;
});
return true;
}
} | String.format("%s Apply percentage: Rule will not be applied. Configured applyPercentage %f%%", this.id, this.result.getInjectionRate() * 100) | public boolean isApplicable(FaultInjectionRequestArgs requestArgs) {
if (!this.isValid()) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
String.format(
"%s[Disable or Duration reached. StartTime: %s, ExpireTime: %s]",
this.id,
this.startTime,
this.expireTime)
);
return false;
}
if (!this.condition.isApplicable(this.id, requestArgs)) {
return false;
}
if (!this.result.isApplicable(this.id, requestArgs.getServiceRequest())) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
this.id + "[Per operation apply limit reached]"
);
return false;
}
if (this.result.getServerErrorType() == FaultInjectionServerErrorType.STALED_ADDRESSES_SERVER_GONE
&& requestArgs.getServiceRequest().faultInjectionRequestContext.getAddressForceRefreshed()) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
"Address force refresh happened, STALED_ADDRESSES error is cleared."
);
return false;
}
long evaluationCount = this.evaluationCount.incrementAndGet();
boolean withinHitLimit = this.hitLimit == null || evaluationCount <= this.hitLimit;
if (!withinHitLimit) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
String.format("%s [Hit Limit reached. Configured hitLimit %d, evaluationCount %d]", this.id, this.hitLimit, evaluationCount)
);
return false;
} else if (random.nextDouble() > this.result.getInjectionRate()) {
requestArgs.getServiceRequest().faultInjectionRequestContext.recordFaultInjectionRuleEvaluation(
requestArgs.getTransportRequestId(),
String.format("%s Injection Rate: Rule will not be applied. Configured injectionRate %.2f%%", this.id, this.result.getInjectionRate() * 100)
);
return false;
} else {
this.hitCount.incrementAndGet();
String name =
requestArgs.getServiceRequest().getOperationType().toString() + "-" + requestArgs.getServiceRequest().getResourceType().toString();
this.hitCountDetails.compute(name, (key, count) -> {
if (count == null) {
count = 0L;
}
count++;
return count;
});
return true;
}
} | class FaultInjectionServerErrorRule implements IFaultInjectionRuleInternal {
private final String id;
private final Instant startTime;
private final Instant expireTime;
private final Integer hitLimit;
private final AtomicLong hitCount;
private final Map<String, Long> hitCountDetails;
private final AtomicLong evaluationCount;
private final FaultInjectionConnectionType connectionType;
private final FaultInjectionConditionInternal condition;
private final FaultInjectionServerErrorResultInternal result;
private static final Random random = new Random();
private boolean enabled;
public FaultInjectionServerErrorRule(
String id,
boolean enabled,
Duration delay,
Duration duration,
Integer hitLimit,
FaultInjectionConnectionType connectionType,
FaultInjectionConditionInternal condition,
FaultInjectionServerErrorResultInternal result) {
checkArgument(StringUtils.isNotEmpty(id), "Argument 'id' cannot be null nor empty");
checkNotNull(condition, "Argument 'condition' can not be null");
checkNotNull(result, "Argument 'result' can not be null");
checkNotNull(connectionType, "Argument 'connectionType' can not be null");
this.id = id;
this.enabled = enabled;
this.hitLimit = hitLimit;
this.startTime = delay == null ? Instant.now() : Instant.now().plusMillis(delay.toMillis());
this.expireTime = duration == null ? Instant.MAX : this.startTime.plusMillis(duration.toMillis());
this.hitCount = new AtomicLong(0);
this.hitCountDetails = new ConcurrentHashMap<>();
this.evaluationCount = new AtomicLong(0);
this.condition = condition;
this.result = result;
this.connectionType = connectionType;
}
public CosmosException getInjectedServerError(RxDocumentServiceRequest request) {
return this.result.getInjectedServerError(request);
}
public String getId() {
return id;
}
@Override
public long getHitCount() {
return this.hitCount.get();
}
@Override
public Map<String, Long> getHitCountDetails() {
return this.hitCountDetails;
}
@Override
public FaultInjectionConnectionType getConnectionType() {
return this.connectionType;
}
public FaultInjectionConditionInternal getCondition() {
return condition;
}
public FaultInjectionServerErrorResultInternal getResult() {
return result;
}
@Override
public boolean isValid() {
Instant now = Instant.now();
return this.enabled
&& (now.equals(this.startTime) || now.isAfter(this.startTime))
&& (now.equals(this.expireTime) || now.isBefore(this.expireTime));
}
@Override
public void disable() {
this.enabled = false;
}
@Override
public List<URI> getAddresses() {
return this.condition.getAddresses();
}
@Override
public List<URI> getRegionEndpoints() {
return this.condition.getRegionEndpoints();
}
} | class FaultInjectionServerErrorRule implements IFaultInjectionRuleInternal {
private final String id;
private final Instant startTime;
private final Instant expireTime;
private final Integer hitLimit;
private final AtomicLong hitCount;
private final Map<String, Long> hitCountDetails;
private final AtomicLong evaluationCount;
private final FaultInjectionConnectionType connectionType;
private final FaultInjectionConditionInternal condition;
private final FaultInjectionServerErrorResultInternal result;
private static final Random random = new Random();
private boolean enabled;
public FaultInjectionServerErrorRule(
String id,
boolean enabled,
Duration delay,
Duration duration,
Integer hitLimit,
FaultInjectionConnectionType connectionType,
FaultInjectionConditionInternal condition,
FaultInjectionServerErrorResultInternal result) {
checkArgument(StringUtils.isNotEmpty(id), "Argument 'id' cannot be null nor empty");
checkNotNull(condition, "Argument 'condition' can not be null");
checkNotNull(result, "Argument 'result' can not be null");
checkNotNull(connectionType, "Argument 'connectionType' can not be null");
this.id = id;
this.enabled = enabled;
this.hitLimit = hitLimit;
this.startTime = delay == null ? Instant.now() : Instant.now().plusMillis(delay.toMillis());
this.expireTime = duration == null ? Instant.MAX : this.startTime.plusMillis(duration.toMillis());
this.hitCount = new AtomicLong(0);
this.hitCountDetails = new ConcurrentHashMap<>();
this.evaluationCount = new AtomicLong(0);
this.condition = condition;
this.result = result;
this.connectionType = connectionType;
}
public CosmosException getInjectedServerError(RxDocumentServiceRequest request) {
return this.result.getInjectedServerError(request);
}
public String getId() {
return id;
}
@Override
public long getHitCount() {
return this.hitCount.get();
}
@Override
public Map<String, Long> getHitCountDetails() {
return this.hitCountDetails;
}
@Override
public FaultInjectionConnectionType getConnectionType() {
return this.connectionType;
}
public FaultInjectionConditionInternal getCondition() {
return condition;
}
public FaultInjectionServerErrorResultInternal getResult() {
return result;
}
@Override
public boolean isValid() {
Instant now = Instant.now();
return this.enabled
&& (now.equals(this.startTime) || now.isAfter(this.startTime))
&& (now.equals(this.expireTime) || now.isBefore(this.expireTime));
}
@Override
public void disable() {
this.enabled = false;
}
@Override
public List<URI> getAddresses() {
return this.condition.getAddresses();
}
@Override
public List<URI> getRegionEndpoints() {
return this.condition.getRegionEndpoints();
}
} |
Good catch, should be `%f` | public String toString() {
return String.format(
"FaultInjectionServerErrorResult{ serverErrorType=%s, times=%s, delay=%s, injectionRate=%s }",
this.serverErrorType,
this.times,
this.delay,
this.injectionRate);
} | "FaultInjectionServerErrorResult{ serverErrorType=%s, times=%s, delay=%s, injectionRate=%s }", | public String toString() {
return String.format(
"FaultInjectionServerErrorResult{ serverErrorType=%s, times=%s, delay=%s, injectionRate=%.2f%% }",
this.serverErrorType,
this.times,
this.delay,
this.injectionRate * 100);
} | class FaultInjectionServerErrorResult implements IFaultInjectionResult {
private final FaultInjectionServerErrorType serverErrorType;
private final Integer times;
private final Duration delay;
private final Boolean suppressServiceRequests;
private final double injectionRate;
FaultInjectionServerErrorResult(
FaultInjectionServerErrorType serverErrorTypes,
Integer times,
Duration delay,
Boolean suppressServiceRequests,
double injectionRate) {
this.serverErrorType = serverErrorTypes;
this.times = times;
this.delay = delay;
this.suppressServiceRequests = suppressServiceRequests;
this.injectionRate = injectionRate;
}
/***
* Get the fault injection server error type.
*
* @return the {@link FaultInjectionServerErrorType}.
*/
public FaultInjectionServerErrorType getServerErrorType() {
return serverErrorType;
}
/***
* Get the number of how many times the rule can be applied on a single operation.
*
* @return the times.
*/
public Integer getTimes() {
return times;
}
/***
* Get the injected delay for the server error.
* Will be required for SERVER_RESPONSE_DELAY and SERVER_CONNECTION_DELAY, will be ignored for others.
*
* @return the injected delay.
*/
public Duration getDelay() {
return delay;
}
/***
* Get a flag indicating whether service requests should be suppressed. If not specified (null) the default
* behavior is applied - only sending the request to the service when the delay is lower
* than the network request timeout.
* @return a flag indicating whether service requests should be suppressed.
*/
public Boolean getSuppressServiceRequests() {
return this.suppressServiceRequests;
}
/***
* Get A double between (0,1] representing the percent of times that the rule will be applied.
* Default value is 1.0 or 100%
* @return the apply percentage.
*/
public double getInjectionRate() { return this.injectionRate; }
@Override
} | class FaultInjectionServerErrorResult implements IFaultInjectionResult {
private final FaultInjectionServerErrorType serverErrorType;
private final Integer times;
private final Duration delay;
private final Boolean suppressServiceRequests;
private final double injectionRate;
FaultInjectionServerErrorResult(
FaultInjectionServerErrorType serverErrorTypes,
Integer times,
Duration delay,
Boolean suppressServiceRequests,
double injectionRate) {
this.serverErrorType = serverErrorTypes;
this.times = times;
this.delay = delay;
this.suppressServiceRequests = suppressServiceRequests;
this.injectionRate = injectionRate;
}
/***
* Get the fault injection server error type.
*
* @return the {@link FaultInjectionServerErrorType}.
*/
public FaultInjectionServerErrorType getServerErrorType() {
return serverErrorType;
}
/***
* Get the number of how many times the rule can be applied on a single operation.
*
* @return the times.
*/
public Integer getTimes() {
return times;
}
/***
* Get the injected delay for the server error.
* Will be required for SERVER_RESPONSE_DELAY and SERVER_CONNECTION_DELAY, will be ignored for others.
*
* @return the injected delay.
*/
public Duration getDelay() {
return delay;
}
/***
* Get a flag indicating whether service requests should be suppressed. If not specified (null) the default
* behavior is applied - only sending the request to the service when the delay is lower
* than the network request timeout.
* @return a flag indicating whether service requests should be suppressed.
*/
public Boolean getSuppressServiceRequests() {
return this.suppressServiceRequests;
}
/***
* Get A double between (0,1] representing the percent of times that the rule will be applied.
* Default value is 1.0 or 100%
* @return the apply percentage.
*/
public double getInjectionRate() {
return this.injectionRate;
}
@Override
} |
https://github.com/Azure/azure-sdk-for-java/blob/f10cd0f9f1d9e45649bff02247c5afd774ff5f19/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/Constants.java#L84 | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
resource = "https:
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
Is it a default that could be overridden by user, or is it a constant that cannot be changed? | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
resource = "https:
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
Copied the logic from storage's PipelineBuilder: https://github.com/Azure/azure-sdk-for-java/blob/f10cd0f9f1d9e45649bff02247c5afd774ff5f19/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/implementation/util/BuilderHelper.java#L121-L126 Seems it can either be default, or the fqdn of the blob. Tested both and they all work. https://github.com/Azure/azure-sdk-for-java/blob/f10cd0f9f1d9e45649bff02247c5afd774ff5f19/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobAudience.java#L16-L20 https://github.com/Azure/azure-sdk-for-java/blob/f10cd0f9f1d9e45649bff02247c5afd774ff5f19/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/BlobAudience.java#L33-L42 Guess the default one is good? Though I wonder the difference between them.. | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
resource = "https:
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
Yeah, maybe fine. Major concern is that different Azure cloud could have different scope. But if this is from storage blob, guess it's fine. | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
resource = "https:
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
Got it. It's a valid concern. Looked up the docs, seems all clouds share the same default `https://storage.azure.com`. https://learn.microsoft.com/en-us/azure/storage/blobs/authorize-access-azure-active-directory#microsoft-authentication-library-msal The major difference is that for scope using fqdn, it's for specific storage account and service > Use this method to acquire a token for authorizing requests to that specific Azure Storage account and service only Guess it's more appropriate. | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
resource = "https:
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
Do we have a diff of one trailing `/` in this 2 pattern? | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
Also, may just paste the link you have (https://learn.microsoft.com/en-us/azure/storage/blobs/authorize-access-azure-active-directory#microsoft-authentication-library-msal), as comment to this code block. | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
updated | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | resource = "https: | static String getDefaultScopeFromUrl(String url, AzureEnvironment environment) {
String resource = environment.getManagementEndpoint();
for (Map.Entry<String, String> endpoint : environment.getEndpoints().entrySet()) {
if (url.contains(endpoint.getValue())) {
if (endpoint.getKey().equals(AzureEnvironment.Endpoint.KEYVAULT.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.GRAPH.identifier())) {
resource = environment.getGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MICROSOFT_GRAPH.identifier())) {
resource = environment.getMicrosoftGraphEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.LOG_ANALYTICS.identifier())) {
resource = environment.getLogAnalyticsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.APPLICATION_INSIGHTS.identifier())) {
resource = environment.getApplicationInsightsEndpoint();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_STORE.identifier())
|| endpoint.getKey().equals(AzureEnvironment.Endpoint.DATA_LAKE_ANALYTICS.identifier())) {
resource = environment.getDataLakeEndpointResourceId();
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.MANAGED_HSM.identifier())) {
resource = String.format("https:
resource = removeTrailingSlash(resource);
break;
} else if (endpoint.getKey().equals(AzureEnvironment.Endpoint.STORAGE.identifier())) {
try {
resource = String.format("https:
resource = removeTrailingSlash(resource);
} catch (MalformedURLException e) {
resource = "https:
}
break;
}
}
}
return resource + "/.default";
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} | class ResourceManagerUtils {
private ResourceManagerUtils() {
}
/**
* Converts an object Boolean to a primitive boolean.
*
* @param value the Boolean value
* @return false if the given Boolean value is null or false else true
*/
public static boolean toPrimitiveBoolean(Boolean value) {
if (value == null) {
return false;
}
return value.booleanValue();
}
/**
* Converts an object Integer to a primitive int.
*
* @param value the Integer value
* @return 0 if the given Integer value is null else integer value
*/
public static int toPrimitiveInt(Integer value) {
if (value == null) {
return 0;
}
return value.intValue();
}
/**
* Converts an object Long to a primitive int.
*
* @param value the Long value
* @return 0 if the given Long value is null else integer value
*/
public static int toPrimitiveInt(Long value) {
if (value == null) {
return 0;
}
return Math.toIntExact(value);
}
/**
* Converts an object Long to a primitive long.
*
* @param value the Long value
* @return 0 if the given Long value is null else long value
*/
public static long toPrimitiveLong(Long value) {
if (value == null) {
return 0;
}
return value;
}
/**
* Wrapper for thread sleep.
*
* @param duration the duration value for which thread should put on sleep.
*/
public static void sleep(Duration duration) {
try {
Thread.sleep(InternalRuntimeContext.getDelayDuration(duration).toMillis());
} catch (InterruptedException e) {
}
}
/**
* Creates an Odata filter string that can be used for filtering list results by tags.
*
* @param tagName the name of the tag. If not provided, all resources will be returned.
* @param tagValue the value of the tag. If not provided, only tag name will be filtered.
* @return the Odata filter to pass into list methods
*/
public static String createOdataFilterForTags(String tagName, String tagValue) {
if (tagName == null) {
return null;
} else if (tagValue == null) {
return String.format("tagname eq '%s'", tagName);
} else {
return String.format("tagname eq '%s' and tagvalue eq '%s'", tagName, tagValue);
}
}
/**
* Gets the only subscription as the default one in the tenant if applicable.
*
* @param subscriptions the list of subscriptions
* @throws IllegalStateException when no subscription or more than one subscription found
* @return the only subscription existing in the tenant
*/
public static String getDefaultSubscription(PagedIterable<Subscription> subscriptions) {
List<Subscription> subscriptionList = new ArrayList<>();
subscriptions.forEach(subscription -> {
subscriptionList.add(subscription);
});
if (subscriptionList.size() == 0) {
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptionList.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptionList.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw new ClientLogger(ResourceManagerUtils.class).logExceptionAsError(
new IllegalStateException(stringBuilder.toString()));
}
return subscriptionList.get(0).subscriptionId();
}
/**
* Generates default scope for oauth2 from the specific request
* @param request a http request
* @param environment the azure environment with current request
* @return the default scope
*/
public static String getDefaultScopeFromRequest(HttpRequest request, AzureEnvironment environment) {
return getDefaultScopeFromUrl(request.getUrl().toString().toLowerCase(Locale.ROOT), environment);
}
/**
* Generates default scope for oauth2 from the specific request
* @param url the url in lower case of a http request
* @param environment the azure environment with current request
* @return the default scope
*/
/**
* Removes the trailing slash of the string.
* @param s the string
* @return the string without trailing slash
*/
private static String removeTrailingSlash(String s) {
if (s == null || s.length() == 0) {
return s;
}
if (s.endsWith("/")) {
return s.substring(0, s.length() - 1);
}
return s;
}
/**
* Get the Azure storage account connection string.
* @param accountName storage account name
* @param accountKey storage account key
* @param environment the Azure environment
* @return the storage account connection string.
*/
public static String getStorageConnectionString(String accountName, String accountKey,
AzureEnvironment environment) {
if (environment == null || environment.getStorageEndpointSuffix() == null) {
environment = AzureEnvironment.AZURE;
}
String suffix = environment.getStorageEndpointSuffix().replaceAll("^\\.*", "");
return String.format("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=%s",
accountName, accountKey, suffix);
}
/**
* The class provides the common methods required for SDK framework.
*
* RESERVED FOR INTERNAL USE.
*/
public static class InternalRuntimeContext {
private Function<String, IdentifierProvider> identifierFunction = ResourceNamer::new;
private static DelayProvider delayProvider = new ResourceDelayProvider();
private static Scheduler reactorScheduler = Schedulers.parallel();
/**
* Sets the resource namer
*
* @param identifierFunction the function.
*/
public void setIdentifierFunction(Function<String, IdentifierProvider> identifierFunction) {
this.identifierFunction = identifierFunction;
}
/**
* Creates a resource namer
*
* @param name the name value.
* @return the new resource namer
*/
public IdentifierProvider createIdentifierProvider(String name) {
return identifierFunction.apply(name);
}
/**
* Gets a random name.
*
* @param prefix the prefix to be used if possible
* @param maxLen the maximum length for the random generated name
* @return the random name
*/
public String randomResourceName(String prefix, int maxLen) {
return identifierFunction.apply("").getRandomName(prefix, maxLen);
}
/**
* Gets a random UUID.
*
* @return the random UUID.
*/
public String randomUuid() {
return identifierFunction.apply("").getRandomUuid();
}
/**
* Function to override the DelayProvider.
*
* @param delayProvider delayProvider to override.
*/
public static void setDelayProvider(DelayProvider delayProvider) {
InternalRuntimeContext.delayProvider = delayProvider;
}
/**
* Wrapper for the duration for delay, based on delayProvider.
*
* @param delay the duration of proposed delay.
* @return the duration of delay.
*/
public static Duration getDelayDuration(Duration delay) {
return delayProvider.getDelayDuration(delay);
}
/**
* Gets the current Rx Scheduler for the SDK framework.
*
* @return current rx scheduler.
*/
public static Scheduler getReactorScheduler() {
return reactorScheduler;
}
/**
* Sets the Rx Scheduler for SDK framework, by default is Scheduler.io().
*
* @param reactorScheduler current Rx Scheduler to be used in SDK framework.
*/
public static void setReactorScheduler(Scheduler reactorScheduler) {
InternalRuntimeContext.reactorScheduler = reactorScheduler;
}
}
} |
It is recommended to use ConditionMessage.Builder to provide clearer message and include information similar to "spring.cloud.azure.servicebus.processor.subscription-name is missing." | public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
Environment environment = context.getEnvironment();
String entityType = environment.getProperty("spring.cloud.azure.servicebus.entity-type", "noType");
String processorEntityType = environment.getProperty("spring.cloud.azure.servicebus.processor.entity-type","noType");
String processorSubscriptionName = environment.getProperty("spring.cloud.azure.servicebus.processor.subscription-name", "noName");
if ("queue".equalsIgnoreCase(entityType) || "queue".equalsIgnoreCase(processorEntityType)) {
return ConditionOutcome.match();
}
if ("topic".equalsIgnoreCase(entityType) || "topic".equalsIgnoreCase(processorEntityType)) {
if (!"noName".equals(processorSubscriptionName)) {
return ConditionOutcome.match();
}
}
return ConditionOutcome.noMatch("Topic need to have subscription name set.");
} | return ConditionOutcome.noMatch("Topic need to have subscription name set."); | public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
String entityType = AzureServiceBusPropertiesUtils.getServiceBusProperties(context, "processor.entity-type", "entity-type");
String processorSubscriptionName = AzureServiceBusPropertiesUtils.getServiceBusProperties(context, "processor.subscription-name");
if ("queue".equalsIgnoreCase(entityType)) {
return ConditionOutcome.match();
}
if ("topic".equalsIgnoreCase(entityType)) {
if (processorSubscriptionName != null) {
return ConditionOutcome.match();
} else {
return ConditionOutcome.noMatch("spring.cloud.azure.servicebus.processor.subscription-name is missing.");
}
}
return ConditionOutcome.noMatch("Entity type should be queue/topic.");
} | class AzureServiceBusProcessorCondition extends SpringBootCondition {
@Override
} | class AzureServiceBusProcessorCondition extends SpringBootCondition {
@Override
} |
`new byte[0]` should be a constant. | private static Object deserialize(BinaryData value, Type type, ObjectSerializer serializer) throws IOException {
return serializer.deserializeFromBytes(value == null ? new byte[0] : value.toBytes(), type);
} | return serializer.deserializeFromBytes(value == null ? new byte[0] : value.toBytes(), type); | private static Object deserialize(BinaryData value, Type type, ObjectSerializer serializer) throws IOException {
return serializer.deserializeFromBytes(value == null ? EMPTY_BYTE_ARRAY : value.toBytes(), type);
} | class HttpResponseBodyDecoder {
private static final ClientLogger LOGGER = new ClientLogger(HttpResponseBodyDecoder.class);
/**
* Decodes the body of an {@link Response} into the type returned by the called API.
*
* <p>If the response body cannot be decoded, null will be returned.</p>
*
* @param body The response body retrieved from the {@link Response} to decode.
* @param response The {@link Response}.
* @param serializer The {@link ObjectSerializer} that performs the decoding.
* @param decodeData The API method metadata used during decoding of the {@link Response response}.
*
* @return The decoded {@link Response response} body, or {@code null} if the body could not be decoded.
*
* @throws HttpResponseException If the body cannot be decoded.
*/
public static Object decodeByteArray(BinaryData body, Response<?> response, ObjectSerializer serializer,
HttpResponseDecodeData decodeData) {
ensureRequestSet(response);
if (response.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return null;
} else if (isErrorStatus(response.getStatusCode(), decodeData)) {
try {
return deserializeBody(body, decodeData.getUnexpectedException(
response.getStatusCode()).getExceptionBodyClass(), null, serializer);
} catch (IOException e) {
return LOGGER.atWarning().log("Failed to deserialize the error entity.", e);
} catch (RuntimeException e) {
Throwable cause = e.getCause();
if (cause instanceof InvocationTargetException || cause instanceof IllegalAccessException
|| cause instanceof NoSuchMethodException || cause instanceof IOException) {
LOGGER.atWarning().log("Failed to deserialize the error entity.", e);
return e;
} else {
throw e;
}
}
} else {
if (!decodeData.isReturnTypeDecodable()) {
return null;
}
try {
return deserializeBody(response.getBody(), extractEntityTypeFromReturnType(decodeData),
decodeData.getReturnValueWireType(), serializer);
} catch (MalformedValueException e) {
throw new HttpResponseException("HTTP response has a malformed body.", response, null, e);
} catch (IOException e) {
throw new HttpResponseException("Deserialization failed.", response, null, e);
}
}
}
/**
* @return The decoded type used to decode the response body, null if the body is not decodable.
*/
public static Type decodedType(final Response<?> response, final HttpResponseDecodeData decodeData) {
ensureRequestSet(response);
if (response.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return null;
} else if (isErrorStatus(response.getStatusCode(), decodeData)) {
return decodeData.getUnexpectedException(response.getStatusCode()).getExceptionBodyClass();
} else {
return decodeData.isReturnTypeDecodable() ? extractEntityTypeFromReturnType(decodeData) : null;
}
}
/**
* Checks the {@link Response} status code is considered as error.
*
* @param statusCode The status code from the response.
* @param decodeData Metadata about the API response.
*
* @return {@code true} if the {@link Response} status code is considered as error, {@code false}
* otherwise.
*/
static boolean isErrorStatus(int statusCode, HttpResponseDecodeData decodeData) {
return !decodeData.isExpectedResponseStatusCode(statusCode);
}
/**
* Deserialize the given string value representing content of a REST API response.
*
* @param value The string value to deserialize.
* @param resultType The return type of the Java proxy method.
* @param wireType Value of the optional {@link HttpRequestInformation
* the Java proxy method indicating 'entity type' (wireType) of REST API wire response body.
*
* @return Deserialized object.
* @throws IOException If the deserialization fails.
*/
private static Object deserializeBody(BinaryData value, Type resultType, Type wireType, ObjectSerializer serializer)
throws IOException {
if (wireType == null) {
return deserialize(value, resultType, serializer);
} else {
Type wireResponseType = constructWireResponseType(resultType, wireType);
Object wireResponse = deserialize(value, wireResponseType, serializer);
return convertToResultType(wireResponse, resultType, wireType);
}
}
/**
* Given: (1). The {@link Type result type} of the Java proxy method return value and (2). The
* {@link HttpRequestInformation
* the same REST APIs wire response body, this method will construct the 'response body Type'.
*
* <p>Note: When the {@link HttpRequestInformation
* the raw HTTP response content will need to be parsed using the derived 'response body Type' and then converted to
* the actual {@code returnType}.</p>
*
* @param resultType The {@link Type} of java proxy method return value.
* @param wireType The {@link Type} of entity in REST API response body.
*
* @return The {@link Type} of REST API response body.
*/
private static Type constructWireResponseType(Type resultType, Type wireType) {
Objects.requireNonNull(wireType);
if (resultType == byte[].class) {
if (wireType == Base64Url.class) {
return Base64Url.class;
}
} else if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
return DateTimeRfc1123.class;
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
final Type wireResponseElementType = constructWireResponseType(resultElementType, wireType);
return TypeUtil.createParameterizedType(((ParameterizedType) resultType).getRawType(),
wireResponseElementType);
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
final Type[] typeArguments = TypeUtil.getTypeArguments(resultType);
final Type resultValueType = typeArguments[1];
final Type wireResponseValueType = constructWireResponseType(resultValueType, wireType);
return TypeUtil.createParameterizedType(((ParameterizedType) resultType).getRawType(),
typeArguments[0], wireResponseValueType);
}
return resultType;
}
/**
* Converts the object {@code wireResponse} that was deserialized using the 'response body Type' (produced by
* {@link HttpResponseBodyDecoder
*
* @param wireResponse The object to convert.
* @param resultType The {@link Type} to convert the {@code wireResponse} to.
* @param wireType The {@link Type} of the {@code wireResponse}.
*
* @return The converted object.
*/
private static Object convertToResultType(final Object wireResponse, final Type resultType, final Type wireType) {
if (resultType == byte[].class) {
if (wireType == Base64Url.class) {
return (new Base64Url(wireResponse.toString())).decodedBytes();
}
} else if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
return new DateTimeRfc1123(wireResponse.toString()).getDateTime();
} else {
return OffsetDateTime.parse(wireResponse.toString());
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
@SuppressWarnings("unchecked") final List<Object> wireResponseList = (List<Object>) wireResponse;
final int wireResponseListSize = wireResponseList.size();
for (int i = 0; i < wireResponseListSize; ++i) {
final Object wireResponseElement = wireResponseList.get(i);
final Object resultElement = convertToResultType(wireResponseElement, resultElementType, wireType);
if (wireResponseElement != resultElement) {
wireResponseList.set(i, resultElement);
}
}
return wireResponseList;
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
final Type resultValueType = TypeUtil.getTypeArguments(resultType)[1];
@SuppressWarnings("unchecked") final Map<String, Object> wireResponseMap
= (Map<String, Object>) wireResponse;
final Set<Map.Entry<String, Object>> wireResponseEntries = wireResponseMap.entrySet();
for (Map.Entry<String, Object> wireResponseEntry : wireResponseEntries) {
final Object wireResponseValue = wireResponseEntry.getValue();
final Object resultValue = convertToResultType(wireResponseValue, resultValueType, wireType);
if (wireResponseValue != resultValue) {
wireResponseMap.put(wireResponseEntry.getKey(), resultValue);
}
}
return wireResponseMap;
}
return wireResponse;
}
/**
* Get the {@link Type} entity returned by the REST API.
*
* @return The entity type.
*/
private static Type extractEntityTypeFromReturnType(HttpResponseDecodeData decodeData) {
Type token = decodeData.getReturnType();
if (TypeUtil.isTypeOrSubTypeOf(token, Response.class)) {
token = TypeUtil.getRestResponseBodyType(token);
}
return token;
}
/**
* Ensure that the request property and method are set in the {@link Response}.
*
* @param response The {@link Response} to validate.
*/
private static void ensureRequestSet(Response<?> response) {
Objects.requireNonNull(response.getRequest());
}
} | class HttpResponseBodyDecoder {
private static final ClientLogger LOGGER = new ClientLogger(HttpResponseBodyDecoder.class);
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
/**
* Decodes the body of an {@link Response} into the type returned by the called API.
*
* <p>If the response body cannot be decoded, null will be returned.</p>
*
* @param body The response body retrieved from the {@link Response} to decode.
* @param response The {@link Response}.
* @param serializer The {@link ObjectSerializer} that performs the decoding.
* @param decodeData The API method metadata used during decoding of the {@link Response response}.
*
* @return The decoded {@link Response response} body, or {@code null} if the body could not be decoded.
*
* @throws HttpResponseException If the body cannot be decoded.
*/
public static Object decodeByteArray(BinaryData body, Response<?> response, ObjectSerializer serializer,
HttpResponseDecodeData decodeData) {
ensureRequestSet(response);
if (response.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return null;
} else if (isErrorStatus(response.getStatusCode(), decodeData)) {
try {
return deserializeBody(body, decodeData.getUnexpectedException(
response.getStatusCode()).getExceptionBodyClass(), null, serializer);
} catch (IOException e) {
return LOGGER.atWarning().log("Failed to deserialize the error entity.", e);
} catch (RuntimeException e) {
Throwable cause = e.getCause();
if (cause instanceof InvocationTargetException || cause instanceof IllegalAccessException
|| cause instanceof NoSuchMethodException || cause instanceof IOException) {
LOGGER.atWarning().log("Failed to deserialize the error entity.", e);
return e;
} else {
throw e;
}
}
} else {
if (!decodeData.isReturnTypeDecodable()) {
return null;
}
try {
return deserializeBody(body == null ? response.getBody() : body,
extractEntityTypeFromReturnType(decodeData), decodeData.getReturnValueWireType(), serializer);
} catch (MalformedValueException e) {
throw new HttpResponseException("HTTP response has a malformed body.", response, null, e);
} catch (IOException e) {
throw new HttpResponseException("Deserialization failed.", response, null, e);
}
}
}
/**
* @return The decoded type used to decode the response body, null if the body is not decodable.
*/
public static Type decodedType(final Response<?> response, final HttpResponseDecodeData decodeData) {
ensureRequestSet(response);
if (response.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return null;
} else if (isErrorStatus(response.getStatusCode(), decodeData)) {
return decodeData.getUnexpectedException(response.getStatusCode()).getExceptionBodyClass();
} else {
return decodeData.isReturnTypeDecodable() ? extractEntityTypeFromReturnType(decodeData) : null;
}
}
/**
* Checks the {@link Response} status code is considered as error.
*
* @param statusCode The status code from the response.
* @param decodeData Metadata about the API response.
*
* @return {@code true} if the {@link Response} status code is considered as error, {@code false}
* otherwise.
*/
static boolean isErrorStatus(int statusCode, HttpResponseDecodeData decodeData) {
return !decodeData.isExpectedResponseStatusCode(statusCode);
}
/**
* Deserialize the given string value representing content of a REST API response.
*
* @param value The string value to deserialize.
* @param resultType The return type of the Java proxy method.
* @param wireType Value of the optional {@link HttpRequestInformation
* the Java proxy method indicating 'entity type' (wireType) of REST API wire response body.
*
* @return Deserialized object.
* @throws IOException If the deserialization fails.
*/
private static Object deserializeBody(BinaryData value, Type resultType, Type wireType, ObjectSerializer serializer)
throws IOException {
if (wireType == null) {
return deserialize(value, resultType, serializer);
} else {
Type wireResponseType = constructWireResponseType(resultType, wireType);
Object wireResponse = deserialize(value, wireResponseType, serializer);
return convertToResultType(wireResponse, resultType, wireType);
}
}
/**
* Given: (1). The {@link Type result type} of the Java proxy method return value and (2). The
* {@link HttpRequestInformation
* the same REST APIs wire response body, this method will construct the 'response body Type'.
*
* <p>Note: When the {@link HttpRequestInformation
* the raw HTTP response content will need to be parsed using the derived 'response body Type' and then converted to
* the actual {@code returnType}.</p>
*
* @param resultType The {@link Type} of java proxy method return value.
* @param wireType The {@link Type} of entity in REST API response body.
*
* @return The {@link Type} of REST API response body.
*/
private static Type constructWireResponseType(Type resultType, Type wireType) {
Objects.requireNonNull(wireType);
if (resultType == byte[].class) {
if (wireType == Base64Url.class) {
return Base64Url.class;
}
} else if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
return DateTimeRfc1123.class;
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
final Type wireResponseElementType = constructWireResponseType(resultElementType, wireType);
return TypeUtil.createParameterizedType(((ParameterizedType) resultType).getRawType(),
wireResponseElementType);
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
final Type[] typeArguments = TypeUtil.getTypeArguments(resultType);
final Type resultValueType = typeArguments[1];
final Type wireResponseValueType = constructWireResponseType(resultValueType, wireType);
return TypeUtil.createParameterizedType(((ParameterizedType) resultType).getRawType(),
typeArguments[0], wireResponseValueType);
}
return resultType;
}
/**
* Converts the object {@code wireResponse} that was deserialized using the 'response body Type' (produced by
* {@link HttpResponseBodyDecoder
*
* @param wireResponse The object to convert.
* @param resultType The {@link Type} to convert the {@code wireResponse} to.
* @param wireType The {@link Type} of the {@code wireResponse}.
*
* @return The converted object.
*/
private static Object convertToResultType(final Object wireResponse, final Type resultType, final Type wireType) {
if (resultType == byte[].class) {
if (wireType == Base64Url.class) {
return (new Base64Url(wireResponse.toString())).decodedBytes();
}
} else if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
return new DateTimeRfc1123(wireResponse.toString()).getDateTime();
} else {
return OffsetDateTime.parse(wireResponse.toString());
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
@SuppressWarnings("unchecked") final List<Object> wireResponseList = (List<Object>) wireResponse;
final int wireResponseListSize = wireResponseList.size();
for (int i = 0; i < wireResponseListSize; ++i) {
final Object wireResponseElement = wireResponseList.get(i);
final Object resultElement = convertToResultType(wireResponseElement, resultElementType, wireType);
if (wireResponseElement != resultElement) {
wireResponseList.set(i, resultElement);
}
}
return wireResponseList;
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
final Type resultValueType = TypeUtil.getTypeArguments(resultType)[1];
@SuppressWarnings("unchecked") final Map<String, Object> wireResponseMap
= (Map<String, Object>) wireResponse;
final Set<Map.Entry<String, Object>> wireResponseEntries = wireResponseMap.entrySet();
for (Map.Entry<String, Object> wireResponseEntry : wireResponseEntries) {
final Object wireResponseValue = wireResponseEntry.getValue();
final Object resultValue = convertToResultType(wireResponseValue, resultValueType, wireType);
if (wireResponseValue != resultValue) {
wireResponseMap.put(wireResponseEntry.getKey(), resultValue);
}
}
return wireResponseMap;
}
return wireResponse;
}
/**
* Get the {@link Type} entity returned by the REST API.
*
* @return The entity type.
*/
private static Type extractEntityTypeFromReturnType(HttpResponseDecodeData decodeData) {
Type token = decodeData.getReturnType();
if (TypeUtil.isTypeOrSubTypeOf(token, Response.class)) {
token = TypeUtil.getRestResponseBodyType(token);
}
return token;
}
/**
* Ensure that the request property and method are set in the {@link Response}.
*
* @param response The {@link Response} to validate.
*/
private static void ensureRequestSet(Response<?> response) {
Objects.requireNonNull(response.getRequest());
}
} |
I added assertions to check the values of all of the properties in every class, but I'm wondering if that might be overkill—Would it be better to just assert the values of the long properties only? _(e.g. only readIOps, writeIOps, numSucceededTasks, numFailedTasks, numTaskRetries in this case)_ | public void testDeserializationOfBatchJobStatistics() throws IOException {
String jsonResponse = "{"
+ "\"url\":\"https:
+ "\"startTime\":\"2022-01-01T00:00:00Z\","
+ "\"lastUpdateTime\":\"2022-01-01T01:00:00Z\","
+ "\"userCPUTime\":\"PT1H\","
+ "\"kernelCPUTime\":\"PT30M\","
+ "\"wallClockTime\":\"PT1H30M\","
+ "\"readIOps\":\"1000\","
+ "\"writeIOps\":\"500\","
+ "\"readIOGiB\":0.5,"
+ "\"writeIOGiB\":0.25,"
+ "\"numSucceededTasks\":\"10\","
+ "\"numFailedTasks\":\"2\","
+ "\"numTaskRetries\":\"3\","
+ "\"waitTime\":\"PT10M\""
+ "}";
try (JsonReader jsonReader = JsonProviders.createReader(new StringReader(jsonResponse))) {
BatchJobStatistics stats = BatchJobStatistics.fromJson(jsonReader);
Assertions.assertNotNull(stats);
Assertions.assertEquals("https:
Assertions.assertEquals(OffsetDateTime.parse("2022-01-01T00:00:00Z"), stats.getStartTime());
Assertions.assertEquals(OffsetDateTime.parse("2022-01-01T01:00:00Z"), stats.getLastUpdateTime());
Assertions.assertEquals(Duration.parse("PT1H"), stats.getUserCpuTime());
Assertions.assertEquals(Duration.parse("PT30M"), stats.getKernelCpuTime());
Assertions.assertEquals(Duration.parse("PT1H30M"), stats.getWallClockTime());
Assertions.assertEquals(1000, stats.getReadIOps());
Assertions.assertEquals(500, stats.getWriteIOps());
Assertions.assertEquals(0.5, stats.getReadIOGiB());
Assertions.assertEquals(0.25, stats.getWriteIOGiB());
Assertions.assertEquals(10, stats.getNumSucceededTasks());
Assertions.assertEquals(2, stats.getNumFailedTasks());
Assertions.assertEquals(3, stats.getNumTaskRetries());
Assertions.assertEquals(Duration.parse("PT10M"), stats.getWaitTime());
} catch (IOException e) {
throw new RuntimeException(e);
}
} | Assertions.assertEquals(Duration.parse("PT30M"), stats.getKernelCpuTime()); | public void testDeserializationOfBatchJobStatistics() throws IOException {
String jsonResponse = "{"
+ "\"url\":\"https:
+ "\"startTime\":\"2022-01-01T00:00:00Z\","
+ "\"lastUpdateTime\":\"2022-01-01T01:00:00Z\","
+ "\"userCPUTime\":\"PT1H\","
+ "\"kernelCPUTime\":\"PT30M\","
+ "\"wallClockTime\":\"PT1H30M\","
+ "\"readIOps\":\"1000\","
+ "\"writeIOps\":\"500\","
+ "\"readIOGiB\":0.5,"
+ "\"writeIOGiB\":0.25,"
+ "\"numSucceededTasks\":\"10\","
+ "\"numFailedTasks\":\"2\","
+ "\"numTaskRetries\":\"3\","
+ "\"waitTime\":\"PT10M\""
+ "}";
try (JsonReader jsonReader = JsonProviders.createReader(new StringReader(jsonResponse))) {
BatchJobStatistics stats = BatchJobStatistics.fromJson(jsonReader);
Assertions.assertNotNull(stats);
Assertions.assertEquals("https:
Assertions.assertEquals(OffsetDateTime.parse("2022-01-01T00:00:00Z"), stats.getStartTime());
Assertions.assertEquals(OffsetDateTime.parse("2022-01-01T01:00:00Z"), stats.getLastUpdateTime());
Assertions.assertEquals(Duration.parse("PT1H"), stats.getUserCpuTime());
Assertions.assertEquals(Duration.parse("PT30M"), stats.getKernelCpuTime());
Assertions.assertEquals(Duration.parse("PT1H30M"), stats.getWallClockTime());
Assertions.assertEquals(1000, stats.getReadIOps());
Assertions.assertEquals(500, stats.getWriteIOps());
Assertions.assertEquals(0.5, stats.getReadIOGiB());
Assertions.assertEquals(0.25, stats.getWriteIOGiB());
Assertions.assertEquals(10, stats.getNumSucceededTasks());
Assertions.assertEquals(2, stats.getNumFailedTasks());
Assertions.assertEquals(3, stats.getNumTaskRetries());
Assertions.assertEquals(Duration.parse("PT10M"), stats.getWaitTime());
} catch (IOException e) {
throw new RuntimeException(e);
}
} | class JobTests extends BatchClientTestBase {
private static BatchPool livePool;
static String poolId;
@Override
protected void beforeTest() {
super.beforeTest();
poolId = getStringIdWithUserNamePrefix("-testpool");
if (getTestMode() == TestMode.RECORD) {
if (livePool == null) {
try {
livePool = createIfNotExistIaaSPool(poolId);
} catch (Exception e) {
e.printStackTrace();
}
Assertions.assertNotNull(livePool);
}
}
}
@Test
public void canCrudJob() throws Exception {
String jobId = getStringIdWithUserNamePrefix("-Job-canCRUD");
BatchPoolInfo poolInfo = new BatchPoolInfo();
poolInfo.setPoolId(poolId);
BatchJobCreateContent jobToCreate = new BatchJobCreateContent(jobId, poolInfo);
batchClient.createJob(jobToCreate);
try {
BatchJob job = batchClient.getJob(jobId);
Assertions.assertNotNull(job);
Assertions.assertNotNull(job.isAllowTaskPreemption());
Assertions.assertEquals(-1, (int) job.getMaxParallelTasks());
Assertions.assertEquals(jobId, job.getId());
Assertions.assertEquals((Integer) 0, job.getPriority());
PagedIterable<BatchJob> jobs = batchClient.listJobs();
Assertions.assertNotNull(jobs);
boolean found = false;
for (BatchJob batchJob : jobs) {
if (batchJob.getId().equals(jobId)) {
found = true;
break;
}
}
Assertions.assertTrue(found);
BatchJob replacementJob = job;
replacementJob.setPriority(1);
batchClient.replaceJob(jobId, replacementJob);
job = batchClient.getJob(jobId);
Assertions.assertEquals((Integer) 1, job.getPriority());
batchClient.deleteJob(jobId);
try {
batchClient.getJob(jobId);
Assertions.assertTrue(true, "Shouldn't be here, the job should be deleted");
} catch (Exception e) {
if (!e.getMessage().contains("Status code 404")) {
throw e;
}
}
Thread.sleep(1 * 1000);
} finally {
try {
batchClient.deleteJob(jobId);
} catch (Exception e) {
}
}
}
@Test
public void canUpdateJobState() throws Exception {
String jobId = getStringIdWithUserNamePrefix("-Job-CanUpdateState");
BatchPoolInfo poolInfo = new BatchPoolInfo();
poolInfo.setPoolId(poolId);
BatchJobCreateContent jobToCreate = new BatchJobCreateContent(jobId, poolInfo);
batchClient.createJob(jobToCreate);
try {
BatchJob job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.ACTIVE, job.getState());
Integer maxTaskRetryCount = 3;
Integer priority = 500;
job.setPriority(priority);
job.setConstraints(new BatchJobConstraints().setMaxTaskRetryCount(maxTaskRetryCount));
job.getPoolInfo().setPoolId(poolId);
batchClient.replaceJob(jobId, job);
job = batchClient.getJob(jobId);
Assertions.assertEquals(priority, job.getPriority());
Assertions.assertEquals(maxTaskRetryCount, job.getConstraints().getMaxTaskRetryCount());
batchClient.disableJob(jobId, new BatchJobDisableContent(DisableBatchJobOption.REQUEUE));
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.DISABLING, job.getState());
Thread.sleep(5 * 1000);
job = batchClient.getJob(jobId);
Assertions.assertTrue(job.getState() == BatchJobState.DISABLED || job.getState() == BatchJobState.DISABLING);
Assertions.assertEquals(OnAllBatchTasksComplete.NO_ACTION, job.getOnAllTasksComplete());
BatchJobUpdateContent jobUpdateContent = new BatchJobUpdateContent();
jobUpdateContent.setOnAllTasksComplete(OnAllBatchTasksComplete.TERMINATE_JOB);
batchClient.updateJob(jobId, jobUpdateContent);
job = batchClient.getJob(jobId);
Assertions.assertEquals(OnAllBatchTasksComplete.TERMINATE_JOB, job.getOnAllTasksComplete());
batchClient.enableJob(jobId);
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.ACTIVE, job.getState());
batchClient.terminateJob(jobId, new TerminateBatchJobOptions(), new BatchJobTerminateContent().setTerminationReason("myreason"));
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.TERMINATING, job.getState());
Thread.sleep(2 * 1000);
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.COMPLETED, job.getState());
} finally {
try {
batchClient.deleteJob(jobId);
} catch (Exception e) {
}
}
}
@Test
public void canCRUDJobWithPoolNodeCommunicationMode() throws Exception {
String jobId = getStringIdWithUserNamePrefix("-Job-canCRUDWithPoolNodeComm");
BatchNodeCommunicationMode targetMode = BatchNodeCommunicationMode.SIMPLIFIED;
ImageReference imgRef = new ImageReference().setPublisher("Canonical").setOffer("UbuntuServer")
.setSku("18.04-LTS").setVersion("latest");
VirtualMachineConfiguration configuration = new VirtualMachineConfiguration(imgRef, "batch.node.ubuntu 18.04");
BatchPoolSpecification poolSpec = new BatchPoolSpecification("STANDARD_D1_V2");
poolSpec.setVirtualMachineConfiguration(configuration)
.setTargetNodeCommunicationMode(targetMode);
BatchPoolInfo poolInfo = new BatchPoolInfo();
poolInfo.setAutoPoolSpecification(new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JOB).setPool(poolSpec));
BatchJobCreateContent jobToCreate = new BatchJobCreateContent(jobId, poolInfo);
batchClient.createJob(jobToCreate);
try {
BatchJob job = batchClient.getJob(jobId);
Assertions.assertNotNull(job);
Assertions.assertEquals(jobId, job.getId());
Assertions.assertEquals(targetMode, job.getPoolInfo().getAutoPoolSpecification().getPool().getTargetNodeCommunicationMode());
batchClient.deleteJob(jobId);
try {
batchClient.getJob(jobId);
Assertions.assertTrue(true, "Shouldn't be here, the job should be deleted");
} catch (Exception err) {
if (!err.getMessage().contains("Status code 404")) {
throw err;
}
}
threadSleepInRecordMode(15 * 1000);
} finally {
try {
batchClient.deleteJob(jobId);
} catch (Exception e) {
}
}
}
@Test
} | class JobTests extends BatchClientTestBase {
private static BatchPool livePool;
static String poolId;
@Override
protected void beforeTest() {
super.beforeTest();
poolId = getStringIdWithUserNamePrefix("-testpool");
if (getTestMode() == TestMode.RECORD) {
if (livePool == null) {
try {
livePool = createIfNotExistIaaSPool(poolId);
} catch (Exception e) {
e.printStackTrace();
}
Assertions.assertNotNull(livePool);
}
}
}
@Test
public void canCrudJob() throws Exception {
String jobId = getStringIdWithUserNamePrefix("-Job-canCRUD");
BatchPoolInfo poolInfo = new BatchPoolInfo();
poolInfo.setPoolId(poolId);
BatchJobCreateContent jobToCreate = new BatchJobCreateContent(jobId, poolInfo);
batchClient.createJob(jobToCreate);
try {
BatchJob job = batchClient.getJob(jobId);
Assertions.assertNotNull(job);
Assertions.assertNotNull(job.isAllowTaskPreemption());
Assertions.assertEquals(-1, (int) job.getMaxParallelTasks());
Assertions.assertEquals(jobId, job.getId());
Assertions.assertEquals((Integer) 0, job.getPriority());
PagedIterable<BatchJob> jobs = batchClient.listJobs();
Assertions.assertNotNull(jobs);
boolean found = false;
for (BatchJob batchJob : jobs) {
if (batchJob.getId().equals(jobId)) {
found = true;
break;
}
}
Assertions.assertTrue(found);
BatchJob replacementJob = job;
replacementJob.setPriority(1);
batchClient.replaceJob(jobId, replacementJob);
job = batchClient.getJob(jobId);
Assertions.assertEquals((Integer) 1, job.getPriority());
batchClient.deleteJob(jobId);
try {
batchClient.getJob(jobId);
Assertions.assertTrue(true, "Shouldn't be here, the job should be deleted");
} catch (Exception e) {
if (!e.getMessage().contains("Status code 404")) {
throw e;
}
}
Thread.sleep(1 * 1000);
} finally {
try {
batchClient.deleteJob(jobId);
} catch (Exception e) {
}
}
}
@Test
public void canUpdateJobState() throws Exception {
String jobId = getStringIdWithUserNamePrefix("-Job-CanUpdateState");
BatchPoolInfo poolInfo = new BatchPoolInfo();
poolInfo.setPoolId(poolId);
BatchJobCreateContent jobToCreate = new BatchJobCreateContent(jobId, poolInfo);
batchClient.createJob(jobToCreate);
try {
BatchJob job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.ACTIVE, job.getState());
Integer maxTaskRetryCount = 3;
Integer priority = 500;
job.setPriority(priority);
job.setConstraints(new BatchJobConstraints().setMaxTaskRetryCount(maxTaskRetryCount));
job.getPoolInfo().setPoolId(poolId);
batchClient.replaceJob(jobId, job);
job = batchClient.getJob(jobId);
Assertions.assertEquals(priority, job.getPriority());
Assertions.assertEquals(maxTaskRetryCount, job.getConstraints().getMaxTaskRetryCount());
batchClient.disableJob(jobId, new BatchJobDisableContent(DisableBatchJobOption.REQUEUE));
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.DISABLING, job.getState());
Thread.sleep(5 * 1000);
job = batchClient.getJob(jobId);
Assertions.assertTrue(job.getState() == BatchJobState.DISABLED || job.getState() == BatchJobState.DISABLING);
Assertions.assertEquals(OnAllBatchTasksComplete.NO_ACTION, job.getOnAllTasksComplete());
BatchJobUpdateContent jobUpdateContent = new BatchJobUpdateContent();
jobUpdateContent.setOnAllTasksComplete(OnAllBatchTasksComplete.TERMINATE_JOB);
batchClient.updateJob(jobId, jobUpdateContent);
job = batchClient.getJob(jobId);
Assertions.assertEquals(OnAllBatchTasksComplete.TERMINATE_JOB, job.getOnAllTasksComplete());
batchClient.enableJob(jobId);
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.ACTIVE, job.getState());
batchClient.terminateJob(jobId, new TerminateBatchJobOptions(), new BatchJobTerminateContent().setTerminationReason("myreason"));
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.TERMINATING, job.getState());
Thread.sleep(2 * 1000);
job = batchClient.getJob(jobId);
Assertions.assertEquals(BatchJobState.COMPLETED, job.getState());
} finally {
try {
batchClient.deleteJob(jobId);
} catch (Exception e) {
}
}
}
@Test
public void canCRUDJobWithPoolNodeCommunicationMode() throws Exception {
String jobId = getStringIdWithUserNamePrefix("-Job-canCRUDWithPoolNodeComm");
BatchNodeCommunicationMode targetMode = BatchNodeCommunicationMode.SIMPLIFIED;
ImageReference imgRef = new ImageReference().setPublisher("Canonical").setOffer("UbuntuServer")
.setSku("18.04-LTS").setVersion("latest");
VirtualMachineConfiguration configuration = new VirtualMachineConfiguration(imgRef, "batch.node.ubuntu 18.04");
BatchPoolSpecification poolSpec = new BatchPoolSpecification("STANDARD_D1_V2");
poolSpec.setVirtualMachineConfiguration(configuration)
.setTargetNodeCommunicationMode(targetMode);
BatchPoolInfo poolInfo = new BatchPoolInfo();
poolInfo.setAutoPoolSpecification(new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JOB).setPool(poolSpec));
BatchJobCreateContent jobToCreate = new BatchJobCreateContent(jobId, poolInfo);
batchClient.createJob(jobToCreate);
try {
BatchJob job = batchClient.getJob(jobId);
Assertions.assertNotNull(job);
Assertions.assertEquals(jobId, job.getId());
Assertions.assertEquals(targetMode, job.getPoolInfo().getAutoPoolSpecification().getPool().getTargetNodeCommunicationMode());
batchClient.deleteJob(jobId);
try {
batchClient.getJob(jobId);
Assertions.assertTrue(true, "Shouldn't be here, the job should be deleted");
} catch (Exception err) {
if (!err.getMessage().contains("Status code 404")) {
throw err;
}
}
threadSleepInRecordMode(15 * 1000);
} finally {
try {
batchClient.deleteJob(jobId);
} catch (Exception e) {
}
}
}
@Test
} |
oops, warning | private void closeClient() {
try {
this.eventHubAsyncClient.close();
} catch (Exception ex) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Failed to close the client", ex);
}
} | LOGGER.atInfo() | private void closeClient() {
try {
this.eventHubAsyncClient.close();
} catch (Exception ex) {
LOGGER.atWarning()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Failed to close the client", ex);
}
} | class PartitionBasedLoadBalancer {
private static final ClientLogger LOGGER = new ClientLogger(PartitionBasedLoadBalancer.class);
private final String eventHubName;
private final String consumerGroupName;
private final CheckpointStore checkpointStore;
private final EventHubAsyncClient eventHubAsyncClient;
private final String ownerId;
private final long inactiveTimeLimitInMillis;
private final PartitionPumpManager partitionPumpManager;
private final String fullyQualifiedNamespace;
private final Consumer<ErrorContext> processError;
private final PartitionContext partitionAgnosticContext;
private final AtomicBoolean isLoadBalancerRunning = new AtomicBoolean();
private final LoadBalancingStrategy loadBalancingStrategy;
private final AtomicBoolean morePartitionsToClaim = new AtomicBoolean();
private final AtomicReference<List<String>> partitionsCache = new AtomicReference<>(new ArrayList<>());
/**
* Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group.
* @param checkpointStore The partition manager that this load balancer will use to read/update ownership details.
* @param eventHubAsyncClient The asynchronous Event Hub client used to consume events.
* @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with.
* @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with.
* @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer.
* @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before
* assuming the owner of the partition is inactive.
* @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions
* that this {@link EventProcessorClient} is processing.
* @param processError The callback that will be called when an error occurs while running the load balancer.
* @param loadBalancingStrategy The load balancing strategy to use.
*/
PartitionBasedLoadBalancer(final CheckpointStore checkpointStore,
final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace,
final String eventHubName, final String consumerGroupName, final String ownerId,
final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager,
final Consumer<ErrorContext> processError, LoadBalancingStrategy loadBalancingStrategy) {
this.checkpointStore = checkpointStore;
this.eventHubAsyncClient = eventHubAsyncClient;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.eventHubName = eventHubName;
this.consumerGroupName = consumerGroupName;
this.ownerId = ownerId;
this.inactiveTimeLimitInMillis = TimeUnit.SECONDS
.toMillis(inactiveTimeLimitInSeconds);
this.partitionPumpManager = partitionPumpManager;
this.processError = processError;
this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName,
consumerGroupName, "NONE");
this.loadBalancingStrategy = loadBalancingStrategy;
}
/**
* This is the main method responsible for load balancing. This method is expected to be invoked by the {@link
* EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient}
* owning <b>at most one</b> new partition.
* <p>
* The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active
* EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition,
* this algorithm converges gradually towards a steady state.
* </p>
* When a new partition is claimed, this method is also responsible for starting a partition pump that creates an
* {@link EventHubConsumerAsyncClient} for processing events from that partition.
*/
void loadBalance() {
if (!isLoadBalancerRunning.compareAndSet(false, true)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancer already running.");
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting load balancer.");
/*
* Retrieve current partition ownership details from the datastore.
*/
final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore
.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName)
.timeout(Duration.ofMinutes(1))
.collectMap(PartitionOwnership::getPartitionId, Function.identity());
/*
* Retrieve the list of partition ids from the Event Hub.
*/
Mono<List<String>> partitionsMono;
if (CoreUtils.isNullOrEmpty(partitionsCache.get())) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, eventHubName)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Getting partitions from Event Hubs service.");
partitionsMono = eventHubAsyncClient
.getPartitionIds()
.timeout(Duration.ofMinutes(1))
.collectList();
} else {
partitionsMono = Mono.just(partitionsCache.get());
closeClient();
}
Mono.zip(partitionOwnershipMono, partitionsMono)
.flatMap(this::loadBalance)
.then()
.repeat(() -> LoadBalancingStrategy.GREEDY == loadBalancingStrategy && morePartitionsToClaim.get())
.subscribe(ignored -> { },
ex -> {
LOGGER.atWarning()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(Messages.LOAD_BALANCING_FAILED, ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
isLoadBalancerRunning.set(false);
morePartitionsToClaim.set(false);
},
() -> LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancing completed successfully."));
}
/*
* This method works with the given partition ownership details and Event Hub partitions to evaluate whether the
* current Event Processor should take on the responsibility of processing more partitions.
*/
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) {
return Mono.fromRunnable(() -> {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting next iteration of load balancer.");
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
List<String> partitionIds = tuple.getT2();
if (CoreUtils.isNullOrEmpty(partitionIds)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("There are no partitions in Event Hub " + eventHubName)));
}
partitionsCache.set(partitionIds);
int numberOfPartitions = partitionIds.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfPartitions", numberOfPartitions)
.addKeyValue("ownershipRecords", partitionOwnershipMap.size())
.log("Load balancing.");
if (!isValid(partitionOwnershipMap)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("Invalid partitionOwnership data from CheckpointStore")));
}
/*
* Remove all partitions' ownership that have not been modified for a configuration period of time. This
* means that the previous EventProcessor that owned the partition is probably down and the partition is now
* eligible to be claimed by other EventProcessors.
*/
Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships(
partitionOwnershipMap);
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("activeRecords", activePartitionOwnershipMap.size())
.log("Found active ownership records.");
/*
* Create a map of owner id and a list of partitions it owns
*/
Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values()
.stream()
.collect(
Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList())));
ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>());
logPartitionDistribution(ownerPartitionMap);
if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) {
/*
* If the active partition ownership map is empty, this is the first time an event processor is
* running or all Event Processors are down for this Event Hub, consumer group combination. All
* partitions in this Event Hub are available to claim. Choose a random partition to claim ownership.
*/
claimOwnership(partitionOwnershipMap, partitionIds.get(ThreadLocalRandom.current()
.nextInt(numberOfPartitions)));
return;
}
/*
* Find the minimum number of partitions every event processor should own when the load is
* evenly distributed.
*/
int numberOfActiveEventProcessors = ownerPartitionMap.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfProcessors", ownerPartitionMap.size())
.log("Found active event processors.");
int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors;
/*
* If the number of partitions in Event Hub is not evenly divisible by number of active event processors,
* a few Event Processors may own 1 additional partition than the minimum when the load is balanced.
* Calculate the number of event processors that can own additional partition.
*/
int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors;
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("minPartitionsPerEventProcessor", minPartitionsPerEventProcessor)
.addKeyValue("eventProcessorsWithAdditionalPartition",
numberOfEventProcessorsWithAdditionalPartition)
.log("Calculated number of event processors that can own additional partition.");
if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition,
ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is balanced for this event processor.");
renewOwnership(partitionOwnershipMap);
return;
}
if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("This event processor shouldn't own more partitions");
renewOwnership(partitionOwnershipMap);
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is unbalanced and this event processor should own more partitions");
/*
* If some partitions are unclaimed, this could be because an event processor is down and
* it's partitions are now available for others to own or because event processors are just
* starting up and gradually claiming partitions to own or new partitions were added to Event Hub.
* Find any partition that is not actively owned and claim it.
*
* OR
*
* Find a partition to steal from another event processor. Pick the event processor that has owns the
* highest number of partitions.
*/
String partitionToClaim = partitionIds.parallelStream()
.filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId))
.findAny()
.orElseGet(() -> {
LOGGER.atInfo()
.addKeyValue("partitionCount", 0)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("No unclaimed partitions, stealing from another event processor");
return findPartitionToSteal(ownerPartitionMap);
});
claimOwnership(partitionOwnershipMap, partitionToClaim);
});
}
/*
* Closes the client used by load balancer to get the partitions.
*/
/*
* This method renews the ownership of currently owned partitions
*/
private void renewOwnership(Map<String, PartitionOwnership> partitionOwnershipMap) {
morePartitionsToClaim.set(false);
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe(partitionPumpManager::verifyPartitionConnection,
ex -> {
LOGGER.atError()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Error renewing partition ownership", ex);
isLoadBalancerRunning.set(false);
},
() -> isLoadBalancerRunning.set(false));
}
/*
* Check if partition ownership data is valid before proceeding with load balancing.
*/
private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap.values()
.stream()
.noneMatch(partitionOwnership -> {
return partitionOwnership.getEventHubName() == null
|| !partitionOwnership.getEventHubName().equals(this.eventHubName)
|| partitionOwnership.getConsumerGroup() == null
|| !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName)
|| partitionOwnership.getPartitionId() == null
|| partitionOwnership.getLastModifiedTime() == null
|| partitionOwnership.getETag() == null;
});
}
/*
* Find the event processor that owns the maximum number of partitions and steal a random partition
* from it.
*/
private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet()
.stream()
.max(Comparator.comparingInt(entry -> entry.getValue().size()))
.get();
int numberOfPartitions = ownerWithMaxPartitions.getValue().size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("ownerWithMaxPartitions", ownerWithMaxPartitions.getKey())
.log("Stealing a partition from owner that owns max number of partitions.");
return ownerWithMaxPartitions.getValue().get(ThreadLocalRandom.current().nextInt(numberOfPartitions))
.getPartitionId();
}
/*
* When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor}
* and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional
* partition.
*/
private boolean isLoadBalanced(final int minPartitionsPerEventProcessor,
final int numberOfEventProcessorsWithAdditionalPartition,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int count = 0;
for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) {
int numberOfPartitions = partitionOwnership.size();
if (numberOfPartitions < minPartitionsPerEventProcessor
|| numberOfPartitions > minPartitionsPerEventProcessor + 1) {
return false;
}
if (numberOfPartitions == minPartitionsPerEventProcessor + 1) {
count++;
}
}
return count == numberOfEventProcessorsWithAdditionalPartition;
}
/*
* This method is called after determining that the load is not balanced. This method will evaluate
* if the current event processor should own more partitions. Specifically, this method returns true if the
* current event processor owns less than the minimum number of partitions or if it owns the minimum number
* and no other event processor owns lesser number of partitions than this event processor.
*/
private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size();
int leastPartitionsOwnedByAnyEventProcessor =
ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size();
return numberOfPartitionsOwned < minPartitionsPerEventProcessor
|| numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor;
}
/*
* This method will create a new map of partition id and PartitionOwnership containing only those partitions
* that are actively owned. All entries in the original map returned by CheckpointStore that haven't been
* modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by
* dead event processors. These will not be included in the map returned by this method.
*/
private Map<String, PartitionOwnership> removeInactivePartitionOwnerships(
final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap
.entrySet()
.stream()
.filter(entry -> {
long diff = (System.currentTimeMillis() - entry.getValue().getLastModifiedTime()) / 1000;
LOGGER.atLevel((diff < inactiveTimeLimitInMillis) ? LogLevel.VERBOSE : LogLevel.INFORMATIONAL)
.addKeyValue(PARTITION_ID_KEY, entry.getKey())
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionOwnerId", entry.getValue().getOwnerId())
.addKeyValue("modifiedSecondsAgo", diff)
.log("Detecting inactive ownerships.");
return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < inactiveTimeLimitInMillis)
&& !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId());
}).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) {
LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Attempting to claim ownership of partition.");
PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap,
partitionIdToClaim);
List<PartitionOwnership> partitionsToClaim = new ArrayList<>();
partitionsToClaim.add(ownershipRequest);
partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps()
.keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()));
morePartitionsToClaim.set(true);
checkpointStore
.claimOwnership(partitionsToClaim)
.doOnNext(partitionOwnership -> LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionOwnership.getPartitionId())
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Successfully claimed ownership."))
.doOnError(ex -> LOGGER
.atWarning()
.addKeyValue(PARTITION_ID_KEY, ownershipRequest.getPartitionId())
.log(Messages.FAILED_TO_CLAIM_OWNERSHIP, ex))
.collectList()
.zipWhen(ownershipList -> checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName,
consumerGroupName)
.collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity()))
.subscribe(ownedPartitionCheckpointsTuple -> {
ownedPartitionCheckpointsTuple.getT1()
.stream()
.forEach(po -> partitionPumpManager.startPartitionPump(po,
ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId())));
},
ex -> {
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
throw LOGGER.atError()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(new IllegalStateException("Error while claiming ownership", ex));
},
() -> {
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
});
}
private void logPartitionDistribution(Map<String, List<PartitionOwnership>> ownerPartitionMap) {
if (LOGGER.canLogAtLevel(LogLevel.VERBOSE)) {
LoggingEventBuilder log = LOGGER.atVerbose()
.addKeyValue(OWNER_ID_KEY, ownerId);
for (Entry<String, List<PartitionOwnership>> entry : ownerPartitionMap.entrySet()) {
log.addKeyValue(entry.getKey(), entry.getValue().stream()
.map(po -> po.getPartitionId()).collect(Collectors.joining(",")));
}
log.log("Current partition distribution.");
}
}
private PartitionOwnership createPartitionOwnershipRequest(
final Map<String, PartitionOwnership> partitionOwnershipMap,
final String partitionIdToClaim) {
PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim);
PartitionOwnership partitionOwnershipRequest = new PartitionOwnership()
.setFullyQualifiedNamespace(this.fullyQualifiedNamespace)
.setOwnerId(this.ownerId)
.setPartitionId(partitionIdToClaim)
.setConsumerGroup(this.consumerGroupName)
.setEventHubName(this.eventHubName)
.setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag());
return partitionOwnershipRequest;
}
} | class PartitionBasedLoadBalancer {
private static final ClientLogger LOGGER = new ClientLogger(PartitionBasedLoadBalancer.class);
private final String eventHubName;
private final String consumerGroupName;
private final CheckpointStore checkpointStore;
private final EventHubAsyncClient eventHubAsyncClient;
private final String ownerId;
private final long inactiveTimeLimitInMillis;
private final PartitionPumpManager partitionPumpManager;
private final String fullyQualifiedNamespace;
private final Consumer<ErrorContext> processError;
private final PartitionContext partitionAgnosticContext;
private final AtomicBoolean isLoadBalancerRunning = new AtomicBoolean();
private final LoadBalancingStrategy loadBalancingStrategy;
private final AtomicBoolean morePartitionsToClaim = new AtomicBoolean();
private final AtomicReference<List<String>> partitionsCache = new AtomicReference<>(new ArrayList<>());
/**
* Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group.
* @param checkpointStore The partition manager that this load balancer will use to read/update ownership details.
* @param eventHubAsyncClient The asynchronous Event Hub client used to consume events.
* @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with.
* @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with.
* @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer.
* @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before
* assuming the owner of the partition is inactive.
* @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions
* that this {@link EventProcessorClient} is processing.
* @param processError The callback that will be called when an error occurs while running the load balancer.
* @param loadBalancingStrategy The load balancing strategy to use.
*/
PartitionBasedLoadBalancer(final CheckpointStore checkpointStore,
final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace,
final String eventHubName, final String consumerGroupName, final String ownerId,
final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager,
final Consumer<ErrorContext> processError, LoadBalancingStrategy loadBalancingStrategy) {
this.checkpointStore = checkpointStore;
this.eventHubAsyncClient = eventHubAsyncClient;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.eventHubName = eventHubName;
this.consumerGroupName = consumerGroupName;
this.ownerId = ownerId;
this.inactiveTimeLimitInMillis = TimeUnit.SECONDS
.toMillis(inactiveTimeLimitInSeconds);
this.partitionPumpManager = partitionPumpManager;
this.processError = processError;
this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName,
consumerGroupName, "NONE");
this.loadBalancingStrategy = loadBalancingStrategy;
}
/**
* This is the main method responsible for load balancing. This method is expected to be invoked by the {@link
* EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient}
* owning <b>at most one</b> new partition.
* <p>
* The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active
* EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition,
* this algorithm converges gradually towards a steady state.
* </p>
* When a new partition is claimed, this method is also responsible for starting a partition pump that creates an
* {@link EventHubConsumerAsyncClient} for processing events from that partition.
*/
void loadBalance() {
if (!isLoadBalancerRunning.compareAndSet(false, true)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancer already running.");
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting load balancer.");
/*
* Retrieve current partition ownership details from the datastore.
*/
final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore
.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName)
.timeout(Duration.ofMinutes(1))
.collectMap(PartitionOwnership::getPartitionId, Function.identity());
/*
* Retrieve the list of partition ids from the Event Hub.
*/
Mono<List<String>> partitionsMono;
if (CoreUtils.isNullOrEmpty(partitionsCache.get())) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, eventHubName)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Getting partitions from Event Hubs service.");
partitionsMono = eventHubAsyncClient
.getPartitionIds()
.timeout(Duration.ofMinutes(1))
.collectList();
} else {
partitionsMono = Mono.just(partitionsCache.get());
closeClient();
}
Mono.zip(partitionOwnershipMono, partitionsMono)
.flatMap(this::loadBalance)
.then()
.repeat(() -> LoadBalancingStrategy.GREEDY == loadBalancingStrategy && morePartitionsToClaim.get())
.subscribe(ignored -> { },
ex -> {
LOGGER.atWarning()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(Messages.LOAD_BALANCING_FAILED, ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
isLoadBalancerRunning.set(false);
morePartitionsToClaim.set(false);
},
() -> LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancing completed successfully."));
}
/*
* This method works with the given partition ownership details and Event Hub partitions to evaluate whether the
* current Event Processor should take on the responsibility of processing more partitions.
*/
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) {
return Mono.fromRunnable(() -> {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting next iteration of load balancer.");
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
List<String> partitionIds = tuple.getT2();
if (CoreUtils.isNullOrEmpty(partitionIds)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("There are no partitions in Event Hub " + eventHubName)));
}
partitionsCache.set(partitionIds);
int numberOfPartitions = partitionIds.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfPartitions", numberOfPartitions)
.addKeyValue("ownershipRecords", partitionOwnershipMap.size())
.log("Load balancing.");
if (!isValid(partitionOwnershipMap)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("Invalid partitionOwnership data from CheckpointStore")));
}
/*
* Remove all partitions' ownership that have not been modified for a configuration period of time. This
* means that the previous EventProcessor that owned the partition is probably down and the partition is now
* eligible to be claimed by other EventProcessors.
*/
Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships(
partitionOwnershipMap);
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("activeRecords", activePartitionOwnershipMap.size())
.log("Found active ownership records.");
/*
* Create a map of owner id and a list of partitions it owns
*/
Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values()
.stream()
.collect(
Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList())));
ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>());
logPartitionDistribution(ownerPartitionMap);
if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) {
/*
* If the active partition ownership map is empty, this is the first time an event processor is
* running or all Event Processors are down for this Event Hub, consumer group combination. All
* partitions in this Event Hub are available to claim. Choose a random partition to claim ownership.
*/
claimOwnership(partitionOwnershipMap, partitionIds.get(ThreadLocalRandom.current()
.nextInt(numberOfPartitions)));
return;
}
/*
* Find the minimum number of partitions every event processor should own when the load is
* evenly distributed.
*/
int numberOfActiveEventProcessors = ownerPartitionMap.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfProcessors", ownerPartitionMap.size())
.log("Found active event processors.");
int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors;
/*
* If the number of partitions in Event Hub is not evenly divisible by number of active event processors,
* a few Event Processors may own 1 additional partition than the minimum when the load is balanced.
* Calculate the number of event processors that can own additional partition.
*/
int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors;
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("minPartitionsPerEventProcessor", minPartitionsPerEventProcessor)
.addKeyValue("eventProcessorsWithAdditionalPartition",
numberOfEventProcessorsWithAdditionalPartition)
.log("Calculated number of event processors that can own additional partition.");
if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition,
ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is balanced for this event processor.");
renewOwnership(partitionOwnershipMap);
return;
}
if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("This event processor shouldn't own more partitions");
renewOwnership(partitionOwnershipMap);
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is unbalanced and this event processor should own more partitions");
/*
* If some partitions are unclaimed, this could be because an event processor is down and
* it's partitions are now available for others to own or because event processors are just
* starting up and gradually claiming partitions to own or new partitions were added to Event Hub.
* Find any partition that is not actively owned and claim it.
*
* OR
*
* Find a partition to steal from another event processor. Pick the event processor that has owns the
* highest number of partitions.
*/
String partitionToClaim = partitionIds.parallelStream()
.filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId))
.findAny()
.orElseGet(() -> {
LOGGER.atInfo()
.addKeyValue("partitionCount", 0)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("No unclaimed partitions, stealing from another event processor");
return findPartitionToSteal(ownerPartitionMap);
});
claimOwnership(partitionOwnershipMap, partitionToClaim);
});
}
/*
* Closes the client used by load balancer to get the partitions.
*/
/*
* This method renews the ownership of currently owned partitions
*/
private void renewOwnership(Map<String, PartitionOwnership> partitionOwnershipMap) {
morePartitionsToClaim.set(false);
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe(partitionPumpManager::verifyPartitionConnection,
ex -> {
LOGGER.atError()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Error renewing partition ownership", ex);
isLoadBalancerRunning.set(false);
},
() -> isLoadBalancerRunning.set(false));
}
/*
* Check if partition ownership data is valid before proceeding with load balancing.
*/
private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap.values()
.stream()
.noneMatch(partitionOwnership -> {
return partitionOwnership.getEventHubName() == null
|| !partitionOwnership.getEventHubName().equals(this.eventHubName)
|| partitionOwnership.getConsumerGroup() == null
|| !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName)
|| partitionOwnership.getPartitionId() == null
|| partitionOwnership.getLastModifiedTime() == null
|| partitionOwnership.getETag() == null;
});
}
/*
* Find the event processor that owns the maximum number of partitions and steal a random partition
* from it.
*/
private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet()
.stream()
.max(Comparator.comparingInt(entry -> entry.getValue().size()))
.get();
int numberOfPartitions = ownerWithMaxPartitions.getValue().size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("ownerWithMaxPartitions", ownerWithMaxPartitions.getKey())
.log("Stealing a partition from owner that owns max number of partitions.");
return ownerWithMaxPartitions.getValue().get(ThreadLocalRandom.current().nextInt(numberOfPartitions))
.getPartitionId();
}
/*
* When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor}
* and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional
* partition.
*/
private boolean isLoadBalanced(final int minPartitionsPerEventProcessor,
final int numberOfEventProcessorsWithAdditionalPartition,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int count = 0;
for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) {
int numberOfPartitions = partitionOwnership.size();
if (numberOfPartitions < minPartitionsPerEventProcessor
|| numberOfPartitions > minPartitionsPerEventProcessor + 1) {
return false;
}
if (numberOfPartitions == minPartitionsPerEventProcessor + 1) {
count++;
}
}
return count == numberOfEventProcessorsWithAdditionalPartition;
}
/*
* This method is called after determining that the load is not balanced. This method will evaluate
* if the current event processor should own more partitions. Specifically, this method returns true if the
* current event processor owns less than the minimum number of partitions or if it owns the minimum number
* and no other event processor owns lesser number of partitions than this event processor.
*/
private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size();
int leastPartitionsOwnedByAnyEventProcessor =
ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size();
return numberOfPartitionsOwned < minPartitionsPerEventProcessor
|| numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor;
}
/*
* This method will create a new map of partition id and PartitionOwnership containing only those partitions
* that are actively owned. All entries in the original map returned by CheckpointStore that haven't been
* modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by
* dead event processors. These will not be included in the map returned by this method.
*/
private Map<String, PartitionOwnership> removeInactivePartitionOwnerships(
final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap
.entrySet()
.stream()
.filter(entry -> {
long diff = (System.currentTimeMillis() - entry.getValue().getLastModifiedTime()) / 1000;
LOGGER.atLevel((diff < inactiveTimeLimitInMillis) ? LogLevel.VERBOSE : LogLevel.INFORMATIONAL)
.addKeyValue(PARTITION_ID_KEY, entry.getKey())
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionOwnerId", entry.getValue().getOwnerId())
.addKeyValue("modifiedSecondsAgo", diff)
.log("Detecting inactive ownerships.");
return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < inactiveTimeLimitInMillis)
&& !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId());
}).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) {
LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Attempting to claim ownership of partition.");
PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap,
partitionIdToClaim);
List<PartitionOwnership> partitionsToClaim = new ArrayList<>();
partitionsToClaim.add(ownershipRequest);
partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps()
.keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()));
morePartitionsToClaim.set(true);
checkpointStore
.claimOwnership(partitionsToClaim)
.doOnNext(partitionOwnership -> LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionOwnership.getPartitionId())
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Successfully claimed ownership."))
.doOnError(ex -> LOGGER
.atWarning()
.addKeyValue(PARTITION_ID_KEY, ownershipRequest.getPartitionId())
.log(Messages.FAILED_TO_CLAIM_OWNERSHIP, ex))
.collectList()
.zipWhen(ownershipList -> checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName,
consumerGroupName)
.collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity()))
.subscribe(ownedPartitionCheckpointsTuple -> {
ownedPartitionCheckpointsTuple.getT1()
.stream()
.forEach(po -> partitionPumpManager.startPartitionPump(po,
ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId())));
},
ex -> {
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
throw LOGGER.atError()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(new IllegalStateException("Error while claiming ownership", ex));
},
() -> {
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
});
}
private void logPartitionDistribution(Map<String, List<PartitionOwnership>> ownerPartitionMap) {
if (LOGGER.canLogAtLevel(LogLevel.VERBOSE)) {
LoggingEventBuilder log = LOGGER.atVerbose()
.addKeyValue(OWNER_ID_KEY, ownerId);
for (Entry<String, List<PartitionOwnership>> entry : ownerPartitionMap.entrySet()) {
log.addKeyValue(entry.getKey(), entry.getValue().stream()
.map(po -> po.getPartitionId()).collect(Collectors.joining(",")));
}
log.log("Current partition distribution.");
}
}
private PartitionOwnership createPartitionOwnershipRequest(
final Map<String, PartitionOwnership> partitionOwnershipMap,
final String partitionIdToClaim) {
PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim);
PartitionOwnership partitionOwnershipRequest = new PartitionOwnership()
.setFullyQualifiedNamespace(this.fullyQualifiedNamespace)
.setOwnerId(this.ownerId)
.setPartitionId(partitionIdToClaim)
.setConsumerGroup(this.consumerGroupName)
.setEventHubName(this.eventHubName)
.setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag());
return partitionOwnershipRequest;
}
} |
Remove this else if and move it to L60. Since this is an else-if for record scenarios for the `DocumentTranslationClient` we aren't adding the recording policy and hence no requests are getting recorded. | DocumentTranslationClient getDTClient(String endpoint, String key) {
DocumentTranslationClientBuilder documentTranslationClientbuilder = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
documentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
} else if (interceptorManager.isRecordMode()) {
documentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
documentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return documentTranslationClientbuilder.buildClient();
} | } else if (!interceptorManager.isLiveMode()) { | DocumentTranslationClient getDTClient(String endpoint, String key) {
DocumentTranslationClientBuilder documentTranslationClientbuilder = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
documentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
documentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
documentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return documentTranslationClientbuilder.buildClient();
} | class DocumentTranslationClientTestBase extends TestProxyTestBase {
private static final String HOST_NAME_REGEX = "(?<=http:
private static final String REDACTED = "REDACTED";
@Override
public void beforeTest() {
super.beforeTest();
}
DocumentTranslationClient getDocumentTranslationClient() {
return getDTClient(getEndpoint(), getKey());
}
private void addTestProxySanitizers() {
List<TestProxySanitizer> customSanitizers = new ArrayList<>();
customSanitizers.add(
new TestProxySanitizer("$..sourceUrl", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.BODY_KEY));
customSanitizers.add(
new TestProxySanitizer("$..targetUrl", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.BODY_KEY));
customSanitizers.add(
new TestProxySanitizer("$..glossaryUrl", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.BODY_KEY));
customSanitizers.add(
new TestProxySanitizer("Operation-Location", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.HEADER));
interceptorManager.addSanitizers(customSanitizers);
}
SingleDocumentTranslationClient getSingleDocumentTranslationClient() {
return getSDTClient(getEndpoint(), getKey());
}
private SingleDocumentTranslationClient getSDTClient(String endpoint, String key) {
SingleDocumentTranslationClientBuilder singleDocumentTranslationClientbuilder = new SingleDocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
singleDocumentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
} else if (interceptorManager.isRecordMode()) {
singleDocumentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
singleDocumentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return singleDocumentTranslationClientbuilder.buildClient();
}
private String getEndpoint() {
String playbackEndpoint = "https:
return interceptorManager.isPlaybackMode()
? playbackEndpoint
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_ENDPOINT");
}
private String getKey() {
String playbackApiKey = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackApiKey
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_API_KEY");
}
String getStorageName() {
String playbackStorageName = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackStorageName
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_STORAGE_NAME");
}
private String getConnectionString() {
return interceptorManager.isPlaybackMode()
? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net"
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_CONNECTION_STRING");
}
BlobContainerClient getBlobContainerClient(String containerName) {
BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder()
.containerName(containerName)
.connectionString(getConnectionString());
if (interceptorManager.isPlaybackMode()) {
blobContainerClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobContainerClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
return blobContainerClientBuilder.buildClient();
}
BlobClient getBlobClient(String containerName, String blobName) {
BlobClientBuilder blobClientBuilder = new BlobClientBuilder()
.containerName(containerName)
.connectionString(getConnectionString())
.blobName(blobName);
if (interceptorManager.isPlaybackMode()) {
blobClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
return blobClientBuilder.buildClient();
}
protected static final List<TestDocument> ONE_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test document"));
}
};
protected static final List<TestDocument> TWO_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test file"));
add(new TestDocument("File2.txt", "Second english test file"));
}
};
public List<TestDocument> createDummyTestDocuments(int count) {
List<TestDocument> result = new ArrayList<>();
for (int i = 0; i < count; i++) {
String fileName = "File_" + i + ".txt";
String text = "some random text";
result.add(new TestDocument(fileName, text));
}
return result;
}
String createSourceContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("source", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
String createTargetContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
Map<String, String> createTargetContainerWithClient(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
Map<String, String> containerValues = new HashMap<>();
containerValues.put("sasUri", sasUri);
containerValues.put("containerName", containerName);
return containerValues;
}
String createGlossary(TestDocument document) {
String containerName = testResourceNamer.randomName("glossary", 10);
List<TestDocument> documents = new ArrayList<>();
documents.add(document);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "/" + document.getName() + "?" + sasToken;
return sasUri;
}
BlobContainerClient createContainer(String containerName, List<TestDocument> documents) {
BlobContainerClient containerClient = getBlobContainerClient(containerName);
if (!containerClient.exists()) {
containerClient.create();
}
if (documents != null && !documents.isEmpty()) {
uploadDocuments(containerClient, documents);
}
return containerClient;
}
private void uploadDocuments(BlobContainerClient blobContainerClient, List<TestDocument> documents) {
for (TestDocument document : documents) {
InputStream stream = new ByteArrayInputStream(document.getContent().getBytes());
BlobClient blobClient = blobContainerClient.getBlobClient(document.getName());
blobClient.upload(stream);
}
}
String downloadDocumentStream(String containerName, String blobName) {
BlobClient blobClient = getBlobClient(containerName, blobName);
InputStream blobIS = blobClient.openInputStream();
try {
String content = readInputStreamToString(blobIS);
return content;
} catch (IOException e) {
e.printStackTrace();
}
return "";
}
public static String readInputStreamToString(InputStream inputStream) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
String line;
while ((line = reader.readLine()) != null) {
stringBuilder.append(line);
stringBuilder.append(System.lineSeparator());
}
}
return stringBuilder.toString();
}
} | class DocumentTranslationClientTestBase extends TestProxyTestBase {
private static final String[] DISABLE_SANITIZER_LIST = {"AZSDK3430", "AZSDK2030"};
private boolean sanitizersRemoved = false;
@Override
public void beforeTest() {
super.beforeTest();
}
DocumentTranslationClient getDocumentTranslationClient() {
return getDTClient(getEndpoint(), getKey());
}
SingleDocumentTranslationClient getSingleDocumentTranslationClient() {
return getSDTClient(getEndpoint(), getKey());
}
private SingleDocumentTranslationClient getSDTClient(String endpoint, String key) {
SingleDocumentTranslationClientBuilder singleDocumentTranslationClientbuilder = new SingleDocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
singleDocumentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
singleDocumentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
singleDocumentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return singleDocumentTranslationClientbuilder.buildClient();
}
private String getEndpoint() {
String playbackEndpoint = "https:
return interceptorManager.isPlaybackMode()
? playbackEndpoint
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_ENDPOINT");
}
private String getKey() {
String playbackApiKey = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackApiKey
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_API_KEY");
}
String getStorageName() {
String playbackStorageName = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackStorageName
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_STORAGE_NAME");
}
private String getConnectionString() {
return interceptorManager.isPlaybackMode()
? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net"
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_CONNECTION_STRING");
}
BlobContainerClient getBlobContainerClient(String containerName) {
BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder()
.containerName(containerName)
.connectionString(getConnectionString());
if (interceptorManager.isPlaybackMode()) {
blobContainerClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobContainerClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
return blobContainerClientBuilder.buildClient();
}
protected static final List<TestDocument> ONE_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test document"));
}
};
protected static final List<TestDocument> TWO_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test file"));
add(new TestDocument("File2.txt", "Second english test file"));
}
};
public List<TestDocument> createDummyTestDocuments(int count) {
List<TestDocument> result = new ArrayList<>();
for (int i = 0; i < count; i++) {
String fileName = "File_" + i + ".txt";
String text = "some random text";
result.add(new TestDocument(fileName, text));
}
return result;
}
String createSourceContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("source", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
String createTargetContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
Map<String, String> createTargetContainerWithClient(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
Map<String, String> containerValues = new HashMap<>();
containerValues.put("sasUri", sasUri);
containerValues.put("containerName", containerName);
return containerValues;
}
String createGlossary(TestDocument document) {
String containerName = testResourceNamer.randomName("glossary", 10);
List<TestDocument> documents = new ArrayList<>();
documents.add(document);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "/" + document.getName() + "?" + sasToken;
return sasUri;
}
BlobContainerClient createContainer(String containerName, List<TestDocument> documents) {
BlobContainerClient containerClient = getBlobContainerClient(containerName);
if (!containerClient.exists()) {
containerClient.create();
}
if (documents != null && !documents.isEmpty()) {
uploadDocuments(containerClient, documents);
}
return containerClient;
}
private void uploadDocuments(BlobContainerClient blobContainerClient, List<TestDocument> documents) {
for (TestDocument document : documents) {
InputStream stream = new ByteArrayInputStream(document.getContent().getBytes());
BlobClient blobClient = blobContainerClient.getBlobClient(document.getName());
blobClient.upload(stream);
}
}
String downloadDocumentStream(String targetContainerName, String blobName) {
BlobClientBuilder blobClientBuilder = new BlobClientBuilder()
.containerName(targetContainerName)
.connectionString(getConnectionString())
.blobName(blobName);
if (interceptorManager.isPlaybackMode()) {
blobClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
BlobClient blobClient = blobClientBuilder.buildClient();
InputStream blobIS = blobClient.openInputStream();
try {
String content = readInputStreamToString(blobIS);
return content;
} catch (IOException e) {
e.printStackTrace();
}
return "";
}
public static String readInputStreamToString(InputStream inputStream) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
String line;
while ((line = reader.readLine()) != null) {
stringBuilder.append(line);
stringBuilder.append(System.lineSeparator());
}
}
return stringBuilder.toString();
}
} |
Moved | DocumentTranslationClient getDTClient(String endpoint, String key) {
DocumentTranslationClientBuilder documentTranslationClientbuilder = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
documentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
} else if (interceptorManager.isRecordMode()) {
documentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
documentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return documentTranslationClientbuilder.buildClient();
} | } else if (!interceptorManager.isLiveMode()) { | DocumentTranslationClient getDTClient(String endpoint, String key) {
DocumentTranslationClientBuilder documentTranslationClientbuilder = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
documentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
documentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
documentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return documentTranslationClientbuilder.buildClient();
} | class DocumentTranslationClientTestBase extends TestProxyTestBase {
private static final String HOST_NAME_REGEX = "(?<=http:
private static final String REDACTED = "REDACTED";
@Override
public void beforeTest() {
super.beforeTest();
}
DocumentTranslationClient getDocumentTranslationClient() {
return getDTClient(getEndpoint(), getKey());
}
private void addTestProxySanitizers() {
List<TestProxySanitizer> customSanitizers = new ArrayList<>();
customSanitizers.add(
new TestProxySanitizer("$..sourceUrl", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.BODY_KEY));
customSanitizers.add(
new TestProxySanitizer("$..targetUrl", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.BODY_KEY));
customSanitizers.add(
new TestProxySanitizer("$..glossaryUrl", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.BODY_KEY));
customSanitizers.add(
new TestProxySanitizer("Operation-Location", HOST_NAME_REGEX, REDACTED, TestProxySanitizerType.HEADER));
interceptorManager.addSanitizers(customSanitizers);
}
SingleDocumentTranslationClient getSingleDocumentTranslationClient() {
return getSDTClient(getEndpoint(), getKey());
}
private SingleDocumentTranslationClient getSDTClient(String endpoint, String key) {
SingleDocumentTranslationClientBuilder singleDocumentTranslationClientbuilder = new SingleDocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
singleDocumentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
} else if (interceptorManager.isRecordMode()) {
singleDocumentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
singleDocumentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return singleDocumentTranslationClientbuilder.buildClient();
}
private String getEndpoint() {
String playbackEndpoint = "https:
return interceptorManager.isPlaybackMode()
? playbackEndpoint
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_ENDPOINT");
}
private String getKey() {
String playbackApiKey = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackApiKey
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_API_KEY");
}
String getStorageName() {
String playbackStorageName = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackStorageName
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_STORAGE_NAME");
}
private String getConnectionString() {
return interceptorManager.isPlaybackMode()
? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net"
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_CONNECTION_STRING");
}
BlobContainerClient getBlobContainerClient(String containerName) {
BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder()
.containerName(containerName)
.connectionString(getConnectionString());
if (interceptorManager.isPlaybackMode()) {
blobContainerClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobContainerClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
return blobContainerClientBuilder.buildClient();
}
BlobClient getBlobClient(String containerName, String blobName) {
BlobClientBuilder blobClientBuilder = new BlobClientBuilder()
.containerName(containerName)
.connectionString(getConnectionString())
.blobName(blobName);
if (interceptorManager.isPlaybackMode()) {
blobClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
} else if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3430", "AZSDK2030");
}
if (!interceptorManager.isLiveMode()) {
addTestProxySanitizers();
}
return blobClientBuilder.buildClient();
}
protected static final List<TestDocument> ONE_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test document"));
}
};
protected static final List<TestDocument> TWO_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test file"));
add(new TestDocument("File2.txt", "Second english test file"));
}
};
public List<TestDocument> createDummyTestDocuments(int count) {
List<TestDocument> result = new ArrayList<>();
for (int i = 0; i < count; i++) {
String fileName = "File_" + i + ".txt";
String text = "some random text";
result.add(new TestDocument(fileName, text));
}
return result;
}
String createSourceContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("source", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
String createTargetContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
Map<String, String> createTargetContainerWithClient(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
Map<String, String> containerValues = new HashMap<>();
containerValues.put("sasUri", sasUri);
containerValues.put("containerName", containerName);
return containerValues;
}
String createGlossary(TestDocument document) {
String containerName = testResourceNamer.randomName("glossary", 10);
List<TestDocument> documents = new ArrayList<>();
documents.add(document);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues = new BlobServiceSasSignatureValues(expiresOn,
containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "/" + document.getName() + "?" + sasToken;
return sasUri;
}
BlobContainerClient createContainer(String containerName, List<TestDocument> documents) {
BlobContainerClient containerClient = getBlobContainerClient(containerName);
if (!containerClient.exists()) {
containerClient.create();
}
if (documents != null && !documents.isEmpty()) {
uploadDocuments(containerClient, documents);
}
return containerClient;
}
private void uploadDocuments(BlobContainerClient blobContainerClient, List<TestDocument> documents) {
for (TestDocument document : documents) {
InputStream stream = new ByteArrayInputStream(document.getContent().getBytes());
BlobClient blobClient = blobContainerClient.getBlobClient(document.getName());
blobClient.upload(stream);
}
}
String downloadDocumentStream(String containerName, String blobName) {
BlobClient blobClient = getBlobClient(containerName, blobName);
InputStream blobIS = blobClient.openInputStream();
try {
String content = readInputStreamToString(blobIS);
return content;
} catch (IOException e) {
e.printStackTrace();
}
return "";
}
public static String readInputStreamToString(InputStream inputStream) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
String line;
while ((line = reader.readLine()) != null) {
stringBuilder.append(line);
stringBuilder.append(System.lineSeparator());
}
}
return stringBuilder.toString();
}
} | class DocumentTranslationClientTestBase extends TestProxyTestBase {
private static final String[] DISABLE_SANITIZER_LIST = {"AZSDK3430", "AZSDK2030"};
private boolean sanitizersRemoved = false;
@Override
public void beforeTest() {
super.beforeTest();
}
DocumentTranslationClient getDocumentTranslationClient() {
return getDTClient(getEndpoint(), getKey());
}
SingleDocumentTranslationClient getSingleDocumentTranslationClient() {
return getSDTClient(getEndpoint(), getKey());
}
private SingleDocumentTranslationClient getSDTClient(String endpoint, String key) {
SingleDocumentTranslationClientBuilder singleDocumentTranslationClientbuilder = new SingleDocumentTranslationClientBuilder()
.endpoint(endpoint)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (interceptorManager.isPlaybackMode()) {
singleDocumentTranslationClientbuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
singleDocumentTranslationClientbuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
singleDocumentTranslationClientbuilder.credential(new AzureKeyCredential(key));
return singleDocumentTranslationClientbuilder.buildClient();
}
private String getEndpoint() {
String playbackEndpoint = "https:
return interceptorManager.isPlaybackMode()
? playbackEndpoint
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_ENDPOINT");
}
private String getKey() {
String playbackApiKey = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackApiKey
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_API_KEY");
}
String getStorageName() {
String playbackStorageName = "Sanitized";
return interceptorManager.isPlaybackMode()
? playbackStorageName
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_STORAGE_NAME");
}
private String getConnectionString() {
return interceptorManager.isPlaybackMode()
? "DefaultEndpointsProtocol=https;AccountName=dummyAccount;AccountKey=xyzDummy;EndpointSuffix=core.windows.net"
: Configuration.getGlobalConfiguration().get("DOCUMENT_TRANSLATION_CONNECTION_STRING");
}
BlobContainerClient getBlobContainerClient(String containerName) {
BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder()
.containerName(containerName)
.connectionString(getConnectionString());
if (interceptorManager.isPlaybackMode()) {
blobContainerClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobContainerClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
return blobContainerClientBuilder.buildClient();
}
protected static final List<TestDocument> ONE_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test document"));
}
};
protected static final List<TestDocument> TWO_TEST_DOCUMENTS = new ArrayList<TestDocument>() {
{
add(new TestDocument("Document1.txt", "First english test file"));
add(new TestDocument("File2.txt", "Second english test file"));
}
};
public List<TestDocument> createDummyTestDocuments(int count) {
List<TestDocument> result = new ArrayList<>();
for (int i = 0; i < count; i++) {
String fileName = "File_" + i + ".txt";
String text = "some random text";
result.add(new TestDocument(fileName, text));
}
return result;
}
String createSourceContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("source", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
String createTargetContainer(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
return sasUri;
}
Map<String, String> createTargetContainerWithClient(List<TestDocument> documents) {
String containerName = testResourceNamer.randomName("target", 10);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setWritePermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "?" + sasToken;
Map<String, String> containerValues = new HashMap<>();
containerValues.put("sasUri", sasUri);
containerValues.put("containerName", containerName);
return containerValues;
}
String createGlossary(TestDocument document) {
String containerName = testResourceNamer.randomName("glossary", 10);
List<TestDocument> documents = new ArrayList<>();
documents.add(document);
BlobContainerClient blobContainerClient = createContainer(containerName, documents);
OffsetDateTime expiresOn = OffsetDateTime.now().plusHours(1);
BlobContainerSasPermission containerSasPermission = new BlobContainerSasPermission()
.setReadPermission(true)
.setListPermission(true);
BlobServiceSasSignatureValues serviceSasValues
= new BlobServiceSasSignatureValues(expiresOn, containerSasPermission);
String sasToken = blobContainerClient.generateSas(serviceSasValues);
String containerUrl = blobContainerClient.getBlobContainerUrl();
String sasUri = containerUrl + "/" + document.getName() + "?" + sasToken;
return sasUri;
}
BlobContainerClient createContainer(String containerName, List<TestDocument> documents) {
BlobContainerClient containerClient = getBlobContainerClient(containerName);
if (!containerClient.exists()) {
containerClient.create();
}
if (documents != null && !documents.isEmpty()) {
uploadDocuments(containerClient, documents);
}
return containerClient;
}
private void uploadDocuments(BlobContainerClient blobContainerClient, List<TestDocument> documents) {
for (TestDocument document : documents) {
InputStream stream = new ByteArrayInputStream(document.getContent().getBytes());
BlobClient blobClient = blobContainerClient.getBlobClient(document.getName());
blobClient.upload(stream);
}
}
String downloadDocumentStream(String targetContainerName, String blobName) {
BlobClientBuilder blobClientBuilder = new BlobClientBuilder()
.containerName(targetContainerName)
.connectionString(getConnectionString())
.blobName(blobName);
if (interceptorManager.isPlaybackMode()) {
blobClientBuilder.httpClient(interceptorManager.getPlaybackClient());
} else if (interceptorManager.isRecordMode()) {
blobClientBuilder.addPolicy(interceptorManager.getRecordPolicy());
}
if (!interceptorManager.isLiveMode() && !sanitizersRemoved) {
interceptorManager.removeSanitizers(DISABLE_SANITIZER_LIST);
sanitizersRemoved = true;
}
BlobClient blobClient = blobClientBuilder.buildClient();
InputStream blobIS = blobClient.openInputStream();
try {
String content = readInputStreamToString(blobIS);
return content;
} catch (IOException e) {
e.printStackTrace();
}
return "";
}
public static String readInputStreamToString(InputStream inputStream) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
String line;
while ((line = reader.readLine()) != null) {
stringBuilder.append(line);
stringBuilder.append(System.lineSeparator());
}
}
return stringBuilder.toString();
}
} |
We should rather use the `PagedIterable<DocumentStatus> getDocumentsStatus` APIs that give the model directly rather than using the `BinaryData` APIs and using mapper then to convert them. In tests and samples. Especially in samples as they are easier to understand. | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response
= documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput("https:
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput("https:
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
RequestOptions requestOptions = new RequestOptions();
requestOptions.addQueryParam("statuses",
succeededStatusList.stream()
.map(paramItemValue -> Objects.toString(paramItemValue, ""))
.collect(Collectors.joining(",")),
false);
try {
PagedIterable<BinaryData> documentStatusResponse = documentTranslationClient.getDocumentsStatus(translationId, requestOptions);
for (BinaryData d: documentStatusResponse) {
String id = new ObjectMapper().readTree(d.toBytes()).get("id").asText();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | PagedIterable<BinaryData> documentStatusResponse = documentTranslationClient.getDocumentsStatus(translationId, requestOptions); | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response = documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput(
"https:
"fr")
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
"XLIFF")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput(
"https:
"es")
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
try {
PagedIterable<DocumentStatus> documentStatusResponse = documentTranslationClient
.getDocumentsStatus(translationId, null, null, null, succeededStatusList, null, null, null);
for (DocumentStatus documentsStatus : documentStatusResponse) {
String id = documentsStatus.getId();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | class GetDocumentStatus {
} | class GetDocumentStatus {
} |
Updated in Test code and samples | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response
= documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput("https:
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput("https:
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
RequestOptions requestOptions = new RequestOptions();
requestOptions.addQueryParam("statuses",
succeededStatusList.stream()
.map(paramItemValue -> Objects.toString(paramItemValue, ""))
.collect(Collectors.joining(",")),
false);
try {
PagedIterable<BinaryData> documentStatusResponse = documentTranslationClient.getDocumentsStatus(translationId, requestOptions);
for (BinaryData d: documentStatusResponse) {
String id = new ObjectMapper().readTree(d.toBytes()).get("id").asText();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | PagedIterable<BinaryData> documentStatusResponse = documentTranslationClient.getDocumentsStatus(translationId, requestOptions); | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response = documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput(
"https:
"fr")
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
"XLIFF")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput(
"https:
"es")
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
try {
PagedIterable<DocumentStatus> documentStatusResponse = documentTranslationClient
.getDocumentsStatus(translationId, null, null, null, succeededStatusList, null, null, null);
for (DocumentStatus documentsStatus : documentStatusResponse) {
String id = documentsStatus.getId();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | class GetDocumentStatus {
} | class GetDocumentStatus {
} |
```suggestion for (DocumentStatus documentStatus: documentStatusResponse) { ``` nit | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response
= documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput("https:
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput("https:
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
try {
PagedIterable<DocumentStatus> documentStatusResponse = documentTranslationClient.getDocumentsStatus(translationId, null, null, null, succeededStatusList, null, null, null);
for (DocumentStatus d: documentStatusResponse) {
String id = d.getId();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | for (DocumentStatus d: documentStatusResponse) { | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response = documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput(
"https:
"fr")
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
"XLIFF")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput(
"https:
"es")
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
try {
PagedIterable<DocumentStatus> documentStatusResponse = documentTranslationClient
.getDocumentsStatus(translationId, null, null, null, succeededStatusList, null, null, null);
for (DocumentStatus documentsStatus : documentStatusResponse) {
String id = documentsStatus.getId();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | class GetDocumentStatus {
} | class GetDocumentStatus {
} |
Updated | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response
= documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput("https:
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput("https:
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
try {
PagedIterable<DocumentStatus> documentStatusResponse = documentTranslationClient.getDocumentsStatus(translationId, null, null, null, succeededStatusList, null, null, null);
for (DocumentStatus d: documentStatusResponse) {
String id = d.getId();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | for (DocumentStatus d: documentStatusResponse) { | public static void main(final String[] args) {
String endpoint = System.getenv("DOCUMENT_TRANSLATION_ENDPOINT");
String apiKey = System.getenv("DOCUMENT_TRANSLATION_API_KEY");
AzureKeyCredential credential = new AzureKeyCredential(apiKey);
DocumentTranslationClient documentTranslationClient = new DocumentTranslationClientBuilder()
.endpoint(endpoint)
.credential(credential)
.buildClient();
SyncPoller<TranslationStatus, Void> response = documentTranslationClient
.beginStartTranslation(
new StartTranslationDetails(Arrays.asList(new BatchRequest(
new SourceInput("https:
.setFilter(new DocumentFilter().setPrefix("pre").setSuffix(".txt"))
.setLanguage("en")
.setStorageSource(StorageSource.AZURE_BLOB),
Arrays
.asList(
new TargetInput(
"https:
"fr")
.setCategory("general")
.setGlossaries(Arrays.asList(new Glossary(
"https:
"XLIFF")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageSource(StorageSource.AZURE_BLOB),
new TargetInput(
"https:
"es")
.setCategory("general")
.setStorageSource(StorageSource.AZURE_BLOB)))
.setStorageType(StorageInputType.FOLDER))));
String translationId = response.poll().getValue().getId();
List<String> succeededStatusList = Arrays.asList(Status.SUCCEEDED.toString());
try {
PagedIterable<DocumentStatus> documentStatusResponse = documentTranslationClient
.getDocumentsStatus(translationId, null, null, null, succeededStatusList, null, null, null);
for (DocumentStatus documentsStatus : documentStatusResponse) {
String id = documentsStatus.getId();
System.out.println("Document Translation ID is: " + id);
DocumentStatus documentStatus = documentTranslationClient.getDocumentStatus(translationId, id);
System.out.println("Document ID is: " + documentStatus.getId());
System.out.println("Document Status is: " + documentStatus.getStatus().toString());
System.out.println("Characters Charged is: " + documentStatus.getCharacterCharged().toString());
System.out.println("Document path is: " + documentStatus.getPath());
System.out.println("Document source path is: " + documentStatus.getSourcePath());
}
} catch (Exception e) {
System.err.println("An exception occurred: " + e.getMessage());
e.printStackTrace();
}
} | class GetDocumentStatus {
} | class GetDocumentStatus {
} |
use the `username` var from line 41 rather than extracting again? User name won't ever change as long as the same identity is being used | public static void main(String[] args) {
DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build();
TokenRequestContext trc = new TokenRequestContext().addScopes("https:
TokenRefreshCache tokenRefreshCache = new TokenRefreshCache(defaultAzureCredential, trc);
AccessToken accessToken = tokenRefreshCache.getAccessToken();
boolean useSsl = true;
String cacheHostname = "<HOST_NAME>";
String username = extractUsernameFromToken(accessToken.getToken());
Jedis jedis = createJedisClient(cacheHostname, 6380, username, accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
int maxTries = 3;
int i = 0;
while (i < maxTries) {
try {
jedis.set("Az:key", "testValue");
System.out.println(jedis.get("Az:key"));
break;
} catch (JedisException e) {
e.printStackTrace();
if (jedis.isBroken()) {
jedis.close();
accessToken = tokenRefreshCache.getAccessToken();
jedis = createJedisClient(cacheHostname, 6380, extractUsernameFromToken(accessToken.getToken()), accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
}
}
i++;
}
jedis.close();
} | jedis = createJedisClient(cacheHostname, 6380, extractUsernameFromToken(accessToken.getToken()), accessToken, useSsl); | public static void main(String[] args) {
DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build();
TokenRequestContext trc = new TokenRequestContext().addScopes("https:
TokenRefreshCache tokenRefreshCache = new TokenRefreshCache(defaultAzureCredential, trc);
AccessToken accessToken = tokenRefreshCache.getAccessToken();
boolean useSsl = true;
String cacheHostname = "<HOST_NAME>";
String username = extractUsernameFromToken(accessToken.getToken());
Jedis jedis = createJedisClient(cacheHostname, 6380, username, accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
int maxTries = 3;
int i = 0;
while (i < maxTries) {
try {
jedis.set("Az:key", "testValue");
System.out.println(jedis.get("Az:key"));
break;
} catch (JedisException e) {
e.printStackTrace();
if (jedis.isBroken()) {
jedis.close();
accessToken = tokenRefreshCache.getAccessToken();
jedis = createJedisClient(cacheHostname, 6380, username, accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
}
}
i++;
}
jedis.close();
} | class AuthenticateWithTokenCache {
private static Jedis createJedisClient(String cacheHostname, int port, String username, AccessToken accessToken, boolean useSsl) {
return new Jedis(cacheHostname, port, DefaultJedisClientConfig.builder()
.password(accessToken.getToken())
.user(username)
.ssl(useSsl)
.build());
}
private static String extractUsernameFromToken(String token) {
String[] parts = token.split("\\.");
String base64 = parts[1];
switch (base64.length() % 4) {
case 2:
base64 += "==";
break;
case 3:
base64 += "=";
break;
}
byte[] jsonBytes = Base64.getDecoder().decode(base64);
String json = new String(jsonBytes, StandardCharsets.UTF_8);
JsonObject jwt = JsonParser.parseString(json).getAsJsonObject();
return jwt.get("oid").getAsString();
}
/**
* The token cache to store and proactively refresh the access token.
*/
public static class TokenRefreshCache {
private final TokenCredential tokenCredential;
private final TokenRequestContext tokenRequestContext;
private final Timer timer;
private volatile AccessToken accessToken;
private final Duration maxRefreshOffset = Duration.ofMinutes(5);
private final Duration baseRefreshOffset = Duration.ofMinutes(2);
private Jedis jedisInstanceToAuthenticate;
private String username;
/**
* Creates an instance of TokenRefreshCache
* @param tokenCredential the token credential to be used for authentication.
* @param tokenRequestContext the token request context to be used for authentication.
*/
public TokenRefreshCache(TokenCredential tokenCredential, TokenRequestContext tokenRequestContext) {
this.tokenCredential = tokenCredential;
this.tokenRequestContext = tokenRequestContext;
this.timer = new Timer(true);
}
/**
* Gets the cached access token.
* @return the {@link AccessToken}
*/
public AccessToken getAccessToken() {
if (accessToken != null) {
return accessToken;
} else {
TokenRefreshTask tokenRefreshTask = new TokenRefreshTask();
accessToken = tokenCredential.getToken(tokenRequestContext).block();
timer.schedule(tokenRefreshTask, getTokenRefreshDelay());
return accessToken;
}
}
private class TokenRefreshTask extends TimerTask {
public void run() {
accessToken = tokenCredential.getToken(tokenRequestContext).block();
username = extractUsernameFromToken(accessToken.getToken());
System.out.println("Refreshed Token with Expiry: " + accessToken.getExpiresAt().toEpochSecond());
if (jedisInstanceToAuthenticate != null && !CoreUtils.isNullOrEmpty(username)) {
jedisInstanceToAuthenticate.auth(username, accessToken.getToken());
System.out.println("Refreshed Jedis Connection with fresh access token, token expires at : "
+ accessToken.getExpiresAt().toEpochSecond());
}
timer.schedule(new TokenRefreshTask(), getTokenRefreshDelay());
}
}
private long getTokenRefreshDelay() {
return ((accessToken.getExpiresAt()
.minusSeconds(ThreadLocalRandom.current().nextLong(baseRefreshOffset.getSeconds(), maxRefreshOffset.getSeconds()))
.toEpochSecond() - OffsetDateTime.now().toEpochSecond()) * 1000);
}
/**
* Sets the Jedis to proactively authenticate before token expiry.
* @param jedisInstanceToAuthenticate the instance to authenticate
* @return the updated instance
*/
public TokenRefreshCache setJedisInstanceToAuthenticate(Jedis jedisInstanceToAuthenticate) {
this.jedisInstanceToAuthenticate = jedisInstanceToAuthenticate;
return this;
}
}
} | class AuthenticateWithTokenCache {
private static Jedis createJedisClient(String cacheHostname, int port, String username, AccessToken accessToken, boolean useSsl) {
return new Jedis(cacheHostname, port, DefaultJedisClientConfig.builder()
.password(accessToken.getToken())
.user(username)
.ssl(useSsl)
.build());
}
private static String extractUsernameFromToken(String token) {
String[] parts = token.split("\\.");
String base64 = parts[1];
switch (base64.length() % 4) {
case 2:
base64 += "==";
break;
case 3:
base64 += "=";
break;
}
byte[] jsonBytes = Base64.getDecoder().decode(base64);
String json = new String(jsonBytes, StandardCharsets.UTF_8);
JsonObject jwt = JsonParser.parseString(json).getAsJsonObject();
return jwt.get("oid").getAsString();
}
/**
* The token cache to store and proactively refresh the access token.
*/
public static class TokenRefreshCache {
private final TokenCredential tokenCredential;
private final TokenRequestContext tokenRequestContext;
private final Timer timer;
private volatile AccessToken accessToken;
private final Duration maxRefreshOffset = Duration.ofMinutes(5);
private final Duration baseRefreshOffset = Duration.ofMinutes(2);
private Jedis jedisInstanceToAuthenticate;
private String username;
/**
* Creates an instance of TokenRefreshCache
* @param tokenCredential the token credential to be used for authentication.
* @param tokenRequestContext the token request context to be used for authentication.
*/
public TokenRefreshCache(TokenCredential tokenCredential, TokenRequestContext tokenRequestContext) {
this.tokenCredential = tokenCredential;
this.tokenRequestContext = tokenRequestContext;
this.timer = new Timer(true);
}
/**
* Gets the cached access token.
* @return the {@link AccessToken}
*/
public AccessToken getAccessToken() {
if (accessToken != null) {
return accessToken;
} else {
TokenRefreshTask tokenRefreshTask = new TokenRefreshTask();
accessToken = tokenCredential.getToken(tokenRequestContext).block();
timer.schedule(tokenRefreshTask, getTokenRefreshDelay());
return accessToken;
}
}
private class TokenRefreshTask extends TimerTask {
public void run() {
accessToken = tokenCredential.getToken(tokenRequestContext).block();
username = extractUsernameFromToken(accessToken.getToken());
System.out.println("Refreshed Token with Expiry: " + accessToken.getExpiresAt().toEpochSecond());
if (jedisInstanceToAuthenticate != null && !CoreUtils.isNullOrEmpty(username)) {
jedisInstanceToAuthenticate.auth(username, accessToken.getToken());
System.out.println("Refreshed Jedis Connection with fresh access token, token expires at : "
+ accessToken.getExpiresAt().toEpochSecond());
}
timer.schedule(new TokenRefreshTask(), getTokenRefreshDelay());
}
}
private long getTokenRefreshDelay() {
return ((accessToken.getExpiresAt()
.minusSeconds(ThreadLocalRandom.current().nextLong(baseRefreshOffset.getSeconds(), maxRefreshOffset.getSeconds()))
.toEpochSecond() - OffsetDateTime.now().toEpochSecond()) * 1000);
}
/**
* Sets the Jedis to proactively authenticate before token expiry.
* @param jedisInstanceToAuthenticate the instance to authenticate
* @return the updated instance
*/
public TokenRefreshCache setJedisInstanceToAuthenticate(Jedis jedisInstanceToAuthenticate) {
this.jedisInstanceToAuthenticate = jedisInstanceToAuthenticate;
return this;
}
}
} |
updated to reuse username | public static void main(String[] args) {
DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build();
TokenRequestContext trc = new TokenRequestContext().addScopes("https:
TokenRefreshCache tokenRefreshCache = new TokenRefreshCache(defaultAzureCredential, trc);
AccessToken accessToken = tokenRefreshCache.getAccessToken();
boolean useSsl = true;
String cacheHostname = "<HOST_NAME>";
String username = extractUsernameFromToken(accessToken.getToken());
Jedis jedis = createJedisClient(cacheHostname, 6380, username, accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
int maxTries = 3;
int i = 0;
while (i < maxTries) {
try {
jedis.set("Az:key", "testValue");
System.out.println(jedis.get("Az:key"));
break;
} catch (JedisException e) {
e.printStackTrace();
if (jedis.isBroken()) {
jedis.close();
accessToken = tokenRefreshCache.getAccessToken();
jedis = createJedisClient(cacheHostname, 6380, extractUsernameFromToken(accessToken.getToken()), accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
}
}
i++;
}
jedis.close();
} | jedis = createJedisClient(cacheHostname, 6380, extractUsernameFromToken(accessToken.getToken()), accessToken, useSsl); | public static void main(String[] args) {
DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build();
TokenRequestContext trc = new TokenRequestContext().addScopes("https:
TokenRefreshCache tokenRefreshCache = new TokenRefreshCache(defaultAzureCredential, trc);
AccessToken accessToken = tokenRefreshCache.getAccessToken();
boolean useSsl = true;
String cacheHostname = "<HOST_NAME>";
String username = extractUsernameFromToken(accessToken.getToken());
Jedis jedis = createJedisClient(cacheHostname, 6380, username, accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
int maxTries = 3;
int i = 0;
while (i < maxTries) {
try {
jedis.set("Az:key", "testValue");
System.out.println(jedis.get("Az:key"));
break;
} catch (JedisException e) {
e.printStackTrace();
if (jedis.isBroken()) {
jedis.close();
accessToken = tokenRefreshCache.getAccessToken();
jedis = createJedisClient(cacheHostname, 6380, username, accessToken, useSsl);
tokenRefreshCache
.setJedisInstanceToAuthenticate(jedis);
}
}
i++;
}
jedis.close();
} | class AuthenticateWithTokenCache {
private static Jedis createJedisClient(String cacheHostname, int port, String username, AccessToken accessToken, boolean useSsl) {
return new Jedis(cacheHostname, port, DefaultJedisClientConfig.builder()
.password(accessToken.getToken())
.user(username)
.ssl(useSsl)
.build());
}
private static String extractUsernameFromToken(String token) {
String[] parts = token.split("\\.");
String base64 = parts[1];
switch (base64.length() % 4) {
case 2:
base64 += "==";
break;
case 3:
base64 += "=";
break;
}
byte[] jsonBytes = Base64.getDecoder().decode(base64);
String json = new String(jsonBytes, StandardCharsets.UTF_8);
JsonObject jwt = JsonParser.parseString(json).getAsJsonObject();
return jwt.get("oid").getAsString();
}
/**
* The token cache to store and proactively refresh the access token.
*/
public static class TokenRefreshCache {
private final TokenCredential tokenCredential;
private final TokenRequestContext tokenRequestContext;
private final Timer timer;
private volatile AccessToken accessToken;
private final Duration maxRefreshOffset = Duration.ofMinutes(5);
private final Duration baseRefreshOffset = Duration.ofMinutes(2);
private Jedis jedisInstanceToAuthenticate;
private String username;
/**
* Creates an instance of TokenRefreshCache
* @param tokenCredential the token credential to be used for authentication.
* @param tokenRequestContext the token request context to be used for authentication.
*/
public TokenRefreshCache(TokenCredential tokenCredential, TokenRequestContext tokenRequestContext) {
this.tokenCredential = tokenCredential;
this.tokenRequestContext = tokenRequestContext;
this.timer = new Timer(true);
}
/**
* Gets the cached access token.
* @return the {@link AccessToken}
*/
public AccessToken getAccessToken() {
if (accessToken != null) {
return accessToken;
} else {
TokenRefreshTask tokenRefreshTask = new TokenRefreshTask();
accessToken = tokenCredential.getToken(tokenRequestContext).block();
timer.schedule(tokenRefreshTask, getTokenRefreshDelay());
return accessToken;
}
}
private class TokenRefreshTask extends TimerTask {
public void run() {
accessToken = tokenCredential.getToken(tokenRequestContext).block();
username = extractUsernameFromToken(accessToken.getToken());
System.out.println("Refreshed Token with Expiry: " + accessToken.getExpiresAt().toEpochSecond());
if (jedisInstanceToAuthenticate != null && !CoreUtils.isNullOrEmpty(username)) {
jedisInstanceToAuthenticate.auth(username, accessToken.getToken());
System.out.println("Refreshed Jedis Connection with fresh access token, token expires at : "
+ accessToken.getExpiresAt().toEpochSecond());
}
timer.schedule(new TokenRefreshTask(), getTokenRefreshDelay());
}
}
private long getTokenRefreshDelay() {
return ((accessToken.getExpiresAt()
.minusSeconds(ThreadLocalRandom.current().nextLong(baseRefreshOffset.getSeconds(), maxRefreshOffset.getSeconds()))
.toEpochSecond() - OffsetDateTime.now().toEpochSecond()) * 1000);
}
/**
* Sets the Jedis to proactively authenticate before token expiry.
* @param jedisInstanceToAuthenticate the instance to authenticate
* @return the updated instance
*/
public TokenRefreshCache setJedisInstanceToAuthenticate(Jedis jedisInstanceToAuthenticate) {
this.jedisInstanceToAuthenticate = jedisInstanceToAuthenticate;
return this;
}
}
} | class AuthenticateWithTokenCache {
private static Jedis createJedisClient(String cacheHostname, int port, String username, AccessToken accessToken, boolean useSsl) {
return new Jedis(cacheHostname, port, DefaultJedisClientConfig.builder()
.password(accessToken.getToken())
.user(username)
.ssl(useSsl)
.build());
}
private static String extractUsernameFromToken(String token) {
String[] parts = token.split("\\.");
String base64 = parts[1];
switch (base64.length() % 4) {
case 2:
base64 += "==";
break;
case 3:
base64 += "=";
break;
}
byte[] jsonBytes = Base64.getDecoder().decode(base64);
String json = new String(jsonBytes, StandardCharsets.UTF_8);
JsonObject jwt = JsonParser.parseString(json).getAsJsonObject();
return jwt.get("oid").getAsString();
}
/**
* The token cache to store and proactively refresh the access token.
*/
public static class TokenRefreshCache {
private final TokenCredential tokenCredential;
private final TokenRequestContext tokenRequestContext;
private final Timer timer;
private volatile AccessToken accessToken;
private final Duration maxRefreshOffset = Duration.ofMinutes(5);
private final Duration baseRefreshOffset = Duration.ofMinutes(2);
private Jedis jedisInstanceToAuthenticate;
private String username;
/**
* Creates an instance of TokenRefreshCache
* @param tokenCredential the token credential to be used for authentication.
* @param tokenRequestContext the token request context to be used for authentication.
*/
public TokenRefreshCache(TokenCredential tokenCredential, TokenRequestContext tokenRequestContext) {
this.tokenCredential = tokenCredential;
this.tokenRequestContext = tokenRequestContext;
this.timer = new Timer(true);
}
/**
* Gets the cached access token.
* @return the {@link AccessToken}
*/
public AccessToken getAccessToken() {
if (accessToken != null) {
return accessToken;
} else {
TokenRefreshTask tokenRefreshTask = new TokenRefreshTask();
accessToken = tokenCredential.getToken(tokenRequestContext).block();
timer.schedule(tokenRefreshTask, getTokenRefreshDelay());
return accessToken;
}
}
private class TokenRefreshTask extends TimerTask {
public void run() {
accessToken = tokenCredential.getToken(tokenRequestContext).block();
username = extractUsernameFromToken(accessToken.getToken());
System.out.println("Refreshed Token with Expiry: " + accessToken.getExpiresAt().toEpochSecond());
if (jedisInstanceToAuthenticate != null && !CoreUtils.isNullOrEmpty(username)) {
jedisInstanceToAuthenticate.auth(username, accessToken.getToken());
System.out.println("Refreshed Jedis Connection with fresh access token, token expires at : "
+ accessToken.getExpiresAt().toEpochSecond());
}
timer.schedule(new TokenRefreshTask(), getTokenRefreshDelay());
}
}
private long getTokenRefreshDelay() {
return ((accessToken.getExpiresAt()
.minusSeconds(ThreadLocalRandom.current().nextLong(baseRefreshOffset.getSeconds(), maxRefreshOffset.getSeconds()))
.toEpochSecond() - OffsetDateTime.now().toEpochSecond()) * 1000);
}
/**
* Sets the Jedis to proactively authenticate before token expiry.
* @param jedisInstanceToAuthenticate the instance to authenticate
* @return the updated instance
*/
public TokenRefreshCache setJedisInstanceToAuthenticate(Jedis jedisInstanceToAuthenticate) {
this.jedisInstanceToAuthenticate = jedisInstanceToAuthenticate;
return this;
}
}
} |
no point in logging warning here, we'll log error on line 505 anyway | private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) {
LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Attempting to claim ownership of partition.");
PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap,
partitionIdToClaim);
List<PartitionOwnership> partitionsToClaim = new ArrayList<>();
partitionsToClaim.add(ownershipRequest);
partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps()
.keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()));
morePartitionsToClaim.set(true);
checkpointStore
.claimOwnership(partitionsToClaim)
.doOnNext(partitionOwnership -> LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionOwnership.getPartitionId())
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Successfully claimed ownership."))
.doOnError(ex -> LOGGER
.atWarning()
.addKeyValue(PARTITION_ID_KEY, ownershipRequest.getPartitionId())
.log(Messages.FAILED_TO_CLAIM_OWNERSHIP, ex))
.collectList()
.zipWhen(ownershipList -> checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName,
consumerGroupName)
.collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity()))
.subscribe(ownedPartitionCheckpointsTuple -> {
ownedPartitionCheckpointsTuple.getT1()
.stream()
.forEach(po -> partitionPumpManager.startPartitionPump(po,
ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId())));
},
ex -> {
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
throw LOGGER.atError()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(new IllegalStateException("Error while claiming ownership", ex));
},
() -> {
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
});
} | .stream() | private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) {
LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Attempting to claim ownership of partition.");
PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap,
partitionIdToClaim);
List<PartitionOwnership> partitionsToClaim = new ArrayList<>();
partitionsToClaim.add(ownershipRequest);
partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps()
.keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()));
morePartitionsToClaim.set(true);
checkpointStore
.claimOwnership(partitionsToClaim)
.doOnNext(partitionOwnership -> LOGGER.atInfo()
.addKeyValue(PARTITION_ID_KEY, partitionOwnership.getPartitionId())
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Successfully claimed ownership."))
.doOnError(ex -> LOGGER
.atWarning()
.addKeyValue(PARTITION_ID_KEY, ownershipRequest.getPartitionId())
.log(Messages.FAILED_TO_CLAIM_OWNERSHIP, ex))
.collectList()
.zipWhen(ownershipList -> checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName,
consumerGroupName)
.collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity()))
.subscribe(ownedPartitionCheckpointsTuple -> {
ownedPartitionCheckpointsTuple.getT1()
.stream()
.forEach(po -> partitionPumpManager.startPartitionPump(po,
ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId())));
},
ex -> {
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
throw LOGGER.atError()
.addKeyValue(PARTITION_ID_KEY, partitionIdToClaim)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(new IllegalStateException("Error while claiming ownership", ex));
},
() -> {
if (loadBalancingStrategy == LoadBalancingStrategy.BALANCED) {
isLoadBalancerRunning.set(false);
}
});
} | class PartitionBasedLoadBalancer {
private static final ClientLogger LOGGER = new ClientLogger(PartitionBasedLoadBalancer.class);
private final String eventHubName;
private final String consumerGroupName;
private final CheckpointStore checkpointStore;
private final EventHubAsyncClient eventHubAsyncClient;
private final String ownerId;
private final long inactiveTimeLimitInMillis;
private final PartitionPumpManager partitionPumpManager;
private final String fullyQualifiedNamespace;
private final Consumer<ErrorContext> processError;
private final PartitionContext partitionAgnosticContext;
private final AtomicBoolean isLoadBalancerRunning = new AtomicBoolean();
private final LoadBalancingStrategy loadBalancingStrategy;
private final AtomicBoolean morePartitionsToClaim = new AtomicBoolean();
private final AtomicReference<List<String>> partitionsCache = new AtomicReference<>(new ArrayList<>());
/**
* Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group.
* @param checkpointStore The partition manager that this load balancer will use to read/update ownership details.
* @param eventHubAsyncClient The asynchronous Event Hub client used to consume events.
* @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with.
* @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with.
* @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer.
* @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before
* assuming the owner of the partition is inactive.
* @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions
* that this {@link EventProcessorClient} is processing.
* @param processError The callback that will be called when an error occurs while running the load balancer.
* @param loadBalancingStrategy The load balancing strategy to use.
*/
PartitionBasedLoadBalancer(final CheckpointStore checkpointStore,
final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace,
final String eventHubName, final String consumerGroupName, final String ownerId,
final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager,
final Consumer<ErrorContext> processError, LoadBalancingStrategy loadBalancingStrategy) {
this.checkpointStore = checkpointStore;
this.eventHubAsyncClient = eventHubAsyncClient;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.eventHubName = eventHubName;
this.consumerGroupName = consumerGroupName;
this.ownerId = ownerId;
this.inactiveTimeLimitInMillis = TimeUnit.SECONDS
.toMillis(inactiveTimeLimitInSeconds);
this.partitionPumpManager = partitionPumpManager;
this.processError = processError;
this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName,
consumerGroupName, "NONE");
this.loadBalancingStrategy = loadBalancingStrategy;
}
/**
* This is the main method responsible for load balancing. This method is expected to be invoked by the {@link
* EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient}
* owning <b>at most one</b> new partition.
* <p>
* The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active
* EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition,
* this algorithm converges gradually towards a steady state.
* </p>
* When a new partition is claimed, this method is also responsible for starting a partition pump that creates an
* {@link EventHubConsumerAsyncClient} for processing events from that partition.
*/
void loadBalance() {
if (!isLoadBalancerRunning.compareAndSet(false, true)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancer already running.");
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting load balancer.");
/*
* Retrieve current partition ownership details from the datastore.
*/
final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore
.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName)
.timeout(Duration.ofMinutes(1))
.collectMap(PartitionOwnership::getPartitionId, Function.identity());
/*
* Retrieve the list of partition ids from the Event Hub.
*/
Mono<List<String>> partitionsMono;
if (CoreUtils.isNullOrEmpty(partitionsCache.get())) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, eventHubName)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Getting partitions from Event Hubs service.");
partitionsMono = eventHubAsyncClient
.getPartitionIds()
.timeout(Duration.ofMinutes(1))
.collectList();
} else {
partitionsMono = Mono.just(partitionsCache.get());
closeClient();
}
Mono.zip(partitionOwnershipMono, partitionsMono)
.flatMap(this::loadBalance)
.then()
.repeat(() -> LoadBalancingStrategy.GREEDY == loadBalancingStrategy && morePartitionsToClaim.get())
.subscribe(ignored -> { },
ex -> {
LOGGER.atWarning()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(Messages.LOAD_BALANCING_FAILED, ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
isLoadBalancerRunning.set(false);
morePartitionsToClaim.set(false);
},
() -> LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancing completed successfully."));
}
/*
* This method works with the given partition ownership details and Event Hub partitions to evaluate whether the
* current Event Processor should take on the responsibility of processing more partitions.
*/
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) {
return Mono.fromRunnable(() -> {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting next iteration of load balancer.");
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
List<String> partitionIds = tuple.getT2();
if (CoreUtils.isNullOrEmpty(partitionIds)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("There are no partitions in Event Hub " + eventHubName)));
}
partitionsCache.set(partitionIds);
int numberOfPartitions = partitionIds.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfPartitions", numberOfPartitions)
.addKeyValue("ownershipRecords", partitionOwnershipMap.size())
.log("Load balancing.");
if (!isValid(partitionOwnershipMap)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("Invalid partitionOwnership data from CheckpointStore")));
}
/*
* Remove all partitions' ownership that have not been modified for a configuration period of time. This
* means that the previous EventProcessor that owned the partition is probably down and the partition is now
* eligible to be claimed by other EventProcessors.
*/
Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships(
partitionOwnershipMap);
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("activeRecords", activePartitionOwnershipMap.size())
.log("Found active ownership records.");
/*
* Create a map of owner id and a list of partitions it owns
*/
Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values()
.stream()
.collect(
Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList())));
ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>());
logPartitionDistribution(ownerPartitionMap);
if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) {
/*
* If the active partition ownership map is empty, this is the first time an event processor is
* running or all Event Processors are down for this Event Hub, consumer group combination. All
* partitions in this Event Hub are available to claim. Choose a random partition to claim ownership.
*/
claimOwnership(partitionOwnershipMap, partitionIds.get(ThreadLocalRandom.current()
.nextInt(numberOfPartitions)));
return;
}
/*
* Find the minimum number of partitions every event processor should own when the load is
* evenly distributed.
*/
int numberOfActiveEventProcessors = ownerPartitionMap.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfProcessors", ownerPartitionMap.size())
.log("Found active event processors.");
int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors;
/*
* If the number of partitions in Event Hub is not evenly divisible by number of active event processors,
* a few Event Processors may own 1 additional partition than the minimum when the load is balanced.
* Calculate the number of event processors that can own additional partition.
*/
int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors;
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("minPartitionsPerEventProcessor", minPartitionsPerEventProcessor)
.addKeyValue("eventProcessorsWithAdditionalPartition",
numberOfEventProcessorsWithAdditionalPartition)
.log("Calculated number of event processors that can own additional partition.");
if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition,
ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is balanced for this event processor.");
renewOwnership(partitionOwnershipMap);
return;
}
if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("This event processor shouldn't own more partitions");
renewOwnership(partitionOwnershipMap);
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is unbalanced and this event processor should own more partitions");
/*
* If some partitions are unclaimed, this could be because an event processor is down and
* it's partitions are now available for others to own or because event processors are just
* starting up and gradually claiming partitions to own or new partitions were added to Event Hub.
* Find any partition that is not actively owned and claim it.
*
* OR
*
* Find a partition to steal from another event processor. Pick the event processor that has owns the
* highest number of partitions.
*/
String partitionToClaim = partitionIds.parallelStream()
.filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId))
.findAny()
.orElseGet(() -> {
LOGGER.atInfo()
.addKeyValue("partitionCount", 0)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("No unclaimed partitions, stealing from another event processor");
return findPartitionToSteal(ownerPartitionMap);
});
claimOwnership(partitionOwnershipMap, partitionToClaim);
});
}
/*
* Closes the client used by load balancer to get the partitions.
*/
private void closeClient() {
try {
this.eventHubAsyncClient.close();
} catch (Exception ex) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Failed to close the client", ex);
}
}
/*
* This method renews the ownership of currently owned partitions
*/
private void renewOwnership(Map<String, PartitionOwnership> partitionOwnershipMap) {
morePartitionsToClaim.set(false);
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe(partitionPumpManager::verifyPartitionConnection,
ex -> {
LOGGER.atError()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Error renewing partition ownership", ex);
isLoadBalancerRunning.set(false);
},
() -> isLoadBalancerRunning.set(false));
}
/*
* Check if partition ownership data is valid before proceeding with load balancing.
*/
private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap.values()
.stream()
.noneMatch(partitionOwnership -> {
return partitionOwnership.getEventHubName() == null
|| !partitionOwnership.getEventHubName().equals(this.eventHubName)
|| partitionOwnership.getConsumerGroup() == null
|| !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName)
|| partitionOwnership.getPartitionId() == null
|| partitionOwnership.getLastModifiedTime() == null
|| partitionOwnership.getETag() == null;
});
}
/*
* Find the event processor that owns the maximum number of partitions and steal a random partition
* from it.
*/
private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet()
.stream()
.max(Comparator.comparingInt(entry -> entry.getValue().size()))
.get();
int numberOfPartitions = ownerWithMaxPartitions.getValue().size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("ownerWithMaxPartitions", ownerWithMaxPartitions.getKey())
.log("Stealing a partition from owner that owns max number of partitions.");
return ownerWithMaxPartitions.getValue().get(ThreadLocalRandom.current().nextInt(numberOfPartitions))
.getPartitionId();
}
/*
* When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor}
* and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional
* partition.
*/
private boolean isLoadBalanced(final int minPartitionsPerEventProcessor,
final int numberOfEventProcessorsWithAdditionalPartition,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int count = 0;
for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) {
int numberOfPartitions = partitionOwnership.size();
if (numberOfPartitions < minPartitionsPerEventProcessor
|| numberOfPartitions > minPartitionsPerEventProcessor + 1) {
return false;
}
if (numberOfPartitions == minPartitionsPerEventProcessor + 1) {
count++;
}
}
return count == numberOfEventProcessorsWithAdditionalPartition;
}
/*
* This method is called after determining that the load is not balanced. This method will evaluate
* if the current event processor should own more partitions. Specifically, this method returns true if the
* current event processor owns less than the minimum number of partitions or if it owns the minimum number
* and no other event processor owns lesser number of partitions than this event processor.
*/
private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size();
int leastPartitionsOwnedByAnyEventProcessor =
ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size();
return numberOfPartitionsOwned < minPartitionsPerEventProcessor
|| numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor;
}
/*
* This method will create a new map of partition id and PartitionOwnership containing only those partitions
* that are actively owned. All entries in the original map returned by CheckpointStore that haven't been
* modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by
* dead event processors. These will not be included in the map returned by this method.
*/
private Map<String, PartitionOwnership> removeInactivePartitionOwnerships(
final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap
.entrySet()
.stream()
.filter(entry -> {
long diff = (System.currentTimeMillis() - entry.getValue().getLastModifiedTime()) / 1000;
LOGGER.atLevel((diff < inactiveTimeLimitInMillis) ? LogLevel.VERBOSE : LogLevel.INFORMATIONAL)
.addKeyValue(PARTITION_ID_KEY, entry.getKey())
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionOwnerId", entry.getValue().getOwnerId())
.addKeyValue("modifiedSecondsAgo", diff)
.log("Detecting inactive ownerships.");
return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < inactiveTimeLimitInMillis)
&& !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId());
}).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
private void logPartitionDistribution(Map<String, List<PartitionOwnership>> ownerPartitionMap) {
if (LOGGER.canLogAtLevel(LogLevel.VERBOSE)) {
LoggingEventBuilder log = LOGGER.atVerbose()
.addKeyValue(OWNER_ID_KEY, ownerId);
for (Entry<String, List<PartitionOwnership>> entry : ownerPartitionMap.entrySet()) {
log.addKeyValue(entry.getKey(), entry.getValue().stream()
.map(po -> po.getPartitionId()).collect(Collectors.joining(",")));
}
log.log("Current partition distribution.");
}
}
private PartitionOwnership createPartitionOwnershipRequest(
final Map<String, PartitionOwnership> partitionOwnershipMap,
final String partitionIdToClaim) {
PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim);
PartitionOwnership partitionOwnershipRequest = new PartitionOwnership()
.setFullyQualifiedNamespace(this.fullyQualifiedNamespace)
.setOwnerId(this.ownerId)
.setPartitionId(partitionIdToClaim)
.setConsumerGroup(this.consumerGroupName)
.setEventHubName(this.eventHubName)
.setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag());
return partitionOwnershipRequest;
}
} | class PartitionBasedLoadBalancer {
private static final ClientLogger LOGGER = new ClientLogger(PartitionBasedLoadBalancer.class);
private final String eventHubName;
private final String consumerGroupName;
private final CheckpointStore checkpointStore;
private final EventHubAsyncClient eventHubAsyncClient;
private final String ownerId;
private final long inactiveTimeLimitInMillis;
private final PartitionPumpManager partitionPumpManager;
private final String fullyQualifiedNamespace;
private final Consumer<ErrorContext> processError;
private final PartitionContext partitionAgnosticContext;
private final AtomicBoolean isLoadBalancerRunning = new AtomicBoolean();
private final LoadBalancingStrategy loadBalancingStrategy;
private final AtomicBoolean morePartitionsToClaim = new AtomicBoolean();
private final AtomicReference<List<String>> partitionsCache = new AtomicReference<>(new ArrayList<>());
/**
* Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group.
* @param checkpointStore The partition manager that this load balancer will use to read/update ownership details.
* @param eventHubAsyncClient The asynchronous Event Hub client used to consume events.
* @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with.
* @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with.
* @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer.
* @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before
* assuming the owner of the partition is inactive.
* @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions
* that this {@link EventProcessorClient} is processing.
* @param processError The callback that will be called when an error occurs while running the load balancer.
* @param loadBalancingStrategy The load balancing strategy to use.
*/
PartitionBasedLoadBalancer(final CheckpointStore checkpointStore,
final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace,
final String eventHubName, final String consumerGroupName, final String ownerId,
final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager,
final Consumer<ErrorContext> processError, LoadBalancingStrategy loadBalancingStrategy) {
this.checkpointStore = checkpointStore;
this.eventHubAsyncClient = eventHubAsyncClient;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.eventHubName = eventHubName;
this.consumerGroupName = consumerGroupName;
this.ownerId = ownerId;
this.inactiveTimeLimitInMillis = TimeUnit.SECONDS
.toMillis(inactiveTimeLimitInSeconds);
this.partitionPumpManager = partitionPumpManager;
this.processError = processError;
this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName,
consumerGroupName, "NONE");
this.loadBalancingStrategy = loadBalancingStrategy;
}
/**
* This is the main method responsible for load balancing. This method is expected to be invoked by the {@link
* EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient}
* owning <b>at most one</b> new partition.
* <p>
* The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active
* EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition,
* this algorithm converges gradually towards a steady state.
* </p>
* When a new partition is claimed, this method is also responsible for starting a partition pump that creates an
* {@link EventHubConsumerAsyncClient} for processing events from that partition.
*/
void loadBalance() {
if (!isLoadBalancerRunning.compareAndSet(false, true)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancer already running.");
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting load balancer.");
/*
* Retrieve current partition ownership details from the datastore.
*/
final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore
.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName)
.timeout(Duration.ofMinutes(1))
.collectMap(PartitionOwnership::getPartitionId, Function.identity());
/*
* Retrieve the list of partition ids from the Event Hub.
*/
Mono<List<String>> partitionsMono;
if (CoreUtils.isNullOrEmpty(partitionsCache.get())) {
LOGGER.atInfo()
.addKeyValue(ENTITY_PATH_KEY, eventHubName)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Getting partitions from Event Hubs service.");
partitionsMono = eventHubAsyncClient
.getPartitionIds()
.timeout(Duration.ofMinutes(1))
.collectList();
} else {
partitionsMono = Mono.just(partitionsCache.get());
closeClient();
}
Mono.zip(partitionOwnershipMono, partitionsMono)
.flatMap(this::loadBalance)
.then()
.repeat(() -> LoadBalancingStrategy.GREEDY == loadBalancingStrategy && morePartitionsToClaim.get())
.subscribe(ignored -> { },
ex -> {
LOGGER.atWarning()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log(Messages.LOAD_BALANCING_FAILED, ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
isLoadBalancerRunning.set(false);
morePartitionsToClaim.set(false);
},
() -> LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Load balancing completed successfully."));
}
/*
* This method works with the given partition ownership details and Event Hub partitions to evaluate whether the
* current Event Processor should take on the responsibility of processing more partitions.
*/
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) {
return Mono.fromRunnable(() -> {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Starting next iteration of load balancer.");
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
List<String> partitionIds = tuple.getT2();
if (CoreUtils.isNullOrEmpty(partitionIds)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("There are no partitions in Event Hub " + eventHubName)));
}
partitionsCache.set(partitionIds);
int numberOfPartitions = partitionIds.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfPartitions", numberOfPartitions)
.addKeyValue("ownershipRecords", partitionOwnershipMap.size())
.log("Load balancing.");
if (!isValid(partitionOwnershipMap)) {
throw LOGGER.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("Invalid partitionOwnership data from CheckpointStore")));
}
/*
* Remove all partitions' ownership that have not been modified for a configuration period of time. This
* means that the previous EventProcessor that owned the partition is probably down and the partition is now
* eligible to be claimed by other EventProcessors.
*/
Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships(
partitionOwnershipMap);
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("activeRecords", activePartitionOwnershipMap.size())
.log("Found active ownership records.");
/*
* Create a map of owner id and a list of partitions it owns
*/
Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values()
.stream()
.collect(
Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList())));
ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>());
logPartitionDistribution(ownerPartitionMap);
if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) {
/*
* If the active partition ownership map is empty, this is the first time an event processor is
* running or all Event Processors are down for this Event Hub, consumer group combination. All
* partitions in this Event Hub are available to claim. Choose a random partition to claim ownership.
*/
claimOwnership(partitionOwnershipMap, partitionIds.get(ThreadLocalRandom.current()
.nextInt(numberOfPartitions)));
return;
}
/*
* Find the minimum number of partitions every event processor should own when the load is
* evenly distributed.
*/
int numberOfActiveEventProcessors = ownerPartitionMap.size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("numberOfProcessors", ownerPartitionMap.size())
.log("Found active event processors.");
int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors;
/*
* If the number of partitions in Event Hub is not evenly divisible by number of active event processors,
* a few Event Processors may own 1 additional partition than the minimum when the load is balanced.
* Calculate the number of event processors that can own additional partition.
*/
int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors;
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("minPartitionsPerEventProcessor", minPartitionsPerEventProcessor)
.addKeyValue("eventProcessorsWithAdditionalPartition",
numberOfEventProcessorsWithAdditionalPartition)
.log("Calculated number of event processors that can own additional partition.");
if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition,
ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is balanced for this event processor.");
renewOwnership(partitionOwnershipMap);
return;
}
if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) {
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("This event processor shouldn't own more partitions");
renewOwnership(partitionOwnershipMap);
return;
}
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionCount", ownerPartitionMap.get(ownerId).size())
.log("Load is unbalanced and this event processor should own more partitions");
/*
* If some partitions are unclaimed, this could be because an event processor is down and
* it's partitions are now available for others to own or because event processors are just
* starting up and gradually claiming partitions to own or new partitions were added to Event Hub.
* Find any partition that is not actively owned and claim it.
*
* OR
*
* Find a partition to steal from another event processor. Pick the event processor that has owns the
* highest number of partitions.
*/
String partitionToClaim = partitionIds.parallelStream()
.filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId))
.findAny()
.orElseGet(() -> {
LOGGER.atInfo()
.addKeyValue("partitionCount", 0)
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("No unclaimed partitions, stealing from another event processor");
return findPartitionToSteal(ownerPartitionMap);
});
claimOwnership(partitionOwnershipMap, partitionToClaim);
});
}
/*
* Closes the client used by load balancer to get the partitions.
*/
private void closeClient() {
try {
this.eventHubAsyncClient.close();
} catch (Exception ex) {
LOGGER.atWarning()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Failed to close the client", ex);
}
}
/*
* This method renews the ownership of currently owned partitions
*/
private void renewOwnership(Map<String, PartitionOwnership> partitionOwnershipMap) {
morePartitionsToClaim.set(false);
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.filter(
partitionId -> partitionOwnershipMap.containsKey(partitionId) && partitionOwnershipMap.get(partitionId)
.getOwnerId().equals(this.ownerId))
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe(partitionPumpManager::verifyPartitionConnection,
ex -> {
LOGGER.atError()
.addKeyValue(OWNER_ID_KEY, ownerId)
.log("Error renewing partition ownership", ex);
isLoadBalancerRunning.set(false);
},
() -> isLoadBalancerRunning.set(false));
}
/*
* Check if partition ownership data is valid before proceeding with load balancing.
*/
private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap.values()
.stream()
.noneMatch(partitionOwnership -> {
return partitionOwnership.getEventHubName() == null
|| !partitionOwnership.getEventHubName().equals(this.eventHubName)
|| partitionOwnership.getConsumerGroup() == null
|| !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName)
|| partitionOwnership.getPartitionId() == null
|| partitionOwnership.getLastModifiedTime() == null
|| partitionOwnership.getETag() == null;
});
}
/*
* Find the event processor that owns the maximum number of partitions and steal a random partition
* from it.
*/
private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet()
.stream()
.max(Comparator.comparingInt(entry -> entry.getValue().size()))
.get();
int numberOfPartitions = ownerWithMaxPartitions.getValue().size();
LOGGER.atInfo()
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("ownerWithMaxPartitions", ownerWithMaxPartitions.getKey())
.log("Stealing a partition from owner that owns max number of partitions.");
return ownerWithMaxPartitions.getValue().get(ThreadLocalRandom.current().nextInt(numberOfPartitions))
.getPartitionId();
}
/*
* When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor}
* and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional
* partition.
*/
private boolean isLoadBalanced(final int minPartitionsPerEventProcessor,
final int numberOfEventProcessorsWithAdditionalPartition,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int count = 0;
for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) {
int numberOfPartitions = partitionOwnership.size();
if (numberOfPartitions < minPartitionsPerEventProcessor
|| numberOfPartitions > minPartitionsPerEventProcessor + 1) {
return false;
}
if (numberOfPartitions == minPartitionsPerEventProcessor + 1) {
count++;
}
}
return count == numberOfEventProcessorsWithAdditionalPartition;
}
/*
* This method is called after determining that the load is not balanced. This method will evaluate
* if the current event processor should own more partitions. Specifically, this method returns true if the
* current event processor owns less than the minimum number of partitions or if it owns the minimum number
* and no other event processor owns lesser number of partitions than this event processor.
*/
private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size();
int leastPartitionsOwnedByAnyEventProcessor =
ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size();
return numberOfPartitionsOwned < minPartitionsPerEventProcessor
|| numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor;
}
/*
* This method will create a new map of partition id and PartitionOwnership containing only those partitions
* that are actively owned. All entries in the original map returned by CheckpointStore that haven't been
* modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by
* dead event processors. These will not be included in the map returned by this method.
*/
private Map<String, PartitionOwnership> removeInactivePartitionOwnerships(
final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap
.entrySet()
.stream()
.filter(entry -> {
long diff = (System.currentTimeMillis() - entry.getValue().getLastModifiedTime()) / 1000;
LOGGER.atLevel((diff < inactiveTimeLimitInMillis) ? LogLevel.VERBOSE : LogLevel.INFORMATIONAL)
.addKeyValue(PARTITION_ID_KEY, entry.getKey())
.addKeyValue(OWNER_ID_KEY, ownerId)
.addKeyValue("partitionOwnerId", entry.getValue().getOwnerId())
.addKeyValue("modifiedSecondsAgo", diff)
.log("Detecting inactive ownerships.");
return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < inactiveTimeLimitInMillis)
&& !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId());
}).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
private void logPartitionDistribution(Map<String, List<PartitionOwnership>> ownerPartitionMap) {
if (LOGGER.canLogAtLevel(LogLevel.VERBOSE)) {
LoggingEventBuilder log = LOGGER.atVerbose()
.addKeyValue(OWNER_ID_KEY, ownerId);
for (Entry<String, List<PartitionOwnership>> entry : ownerPartitionMap.entrySet()) {
log.addKeyValue(entry.getKey(), entry.getValue().stream()
.map(po -> po.getPartitionId()).collect(Collectors.joining(",")));
}
log.log("Current partition distribution.");
}
}
private PartitionOwnership createPartitionOwnershipRequest(
final Map<String, PartitionOwnership> partitionOwnershipMap,
final String partitionIdToClaim) {
PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim);
PartitionOwnership partitionOwnershipRequest = new PartitionOwnership()
.setFullyQualifiedNamespace(this.fullyQualifiedNamespace)
.setOwnerId(this.ownerId)
.setPartitionId(partitionIdToClaim)
.setConsumerGroup(this.consumerGroupName)
.setEventHubName(this.eventHubName)
.setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag());
return partitionOwnershipRequest;
}
} |
Tests are failing as the format for this class in Swagger doesn't match what the service actually returns, which is why it had the custom deserializer for Jackson. The service returns: ```xml <Blobs> <BlobPrefix>...</BlobPrefix> <Blob>...</Blob> <BlobPrefix>...</BlobPrefix> </Blobs> ``` But there isn't a good way in Swagger to define "this returns a list that can have one of two types in it, intermixed". So, we'll want to add a code generation customization to change the `toXml` and `fromXml` methods in this class. This should become ```java if (this.blobPrefixes != null) { for (BlobPrefix element : this.blobPrefixes) { xmlWriter.writeXml(element, "BlobPrefix"); } } if (this.blobItems != null) { for (BlobItemInternal element : this.blobItems) { xmlWriter.writeXml(element, "Blob"); } } ``` | public XmlWriter toXml(XmlWriter xmlWriter, String rootElementName) throws XMLStreamException {
rootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
xmlWriter.writeStartElement(rootElementName);
if (this.blobPrefixes != null) {
xmlWriter.writeStartElement("BlobPrefixes");
for (BlobPrefix element : this.blobPrefixes) {
xmlWriter.writeXml(element, "BlobPrefix");
}
xmlWriter.writeEndElement();
}
if (this.blobItems != null) {
xmlWriter.writeStartElement("BlobItems");
for (BlobItemInternal element : this.blobItems) {
xmlWriter.writeXml(element, "Blob");
}
xmlWriter.writeEndElement();
}
return xmlWriter.writeEndElement();
} | } | public XmlWriter toXml(XmlWriter xmlWriter, String rootElementName) throws XMLStreamException {
rootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
xmlWriter.writeStartElement(rootElementName);
if (this.blobPrefixes != null) {
for (BlobPrefix element : this.blobPrefixes) {
xmlWriter.writeXml(element, "BlobPrefix");
}
}
if (this.blobItems != null) {
for (BlobItemInternal element : this.blobItems) {
xmlWriter.writeXml(element, "Blob");
}
}
return xmlWriter.writeEndElement();
} | class BlobHierarchyListSegment implements XmlSerializable<BlobHierarchyListSegment> {
/*
* The BlobPrefixes property.
*/
private List<BlobPrefix> blobPrefixes = new ArrayList<>();
/*
* The BlobItems property.
*/
private List<BlobItemInternal> blobItems = new ArrayList<>();
/**
* Creates an instance of BlobHierarchyListSegment class.
*/
public BlobHierarchyListSegment() {
}
/**
* Get the blobPrefixes property: The BlobPrefixes property.
*
* @return the blobPrefixes value.
*/
public List<BlobPrefix> getBlobPrefixes() {
return this.blobPrefixes;
}
/**
* Set the blobPrefixes property: The BlobPrefixes property.
*
* @param blobPrefixes the blobPrefixes value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobPrefixes(List<BlobPrefix> blobPrefixes) {
this.blobPrefixes = blobPrefixes;
return this;
}
/**
* Get the blobItems property: The BlobItems property.
*
* @return the blobItems value.
*/
public List<BlobItemInternal> getBlobItems() {
return this.blobItems;
}
/**
* Set the blobItems property: The BlobItems property.
*
* @param blobItems the blobItems value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobItems(List<BlobItemInternal> blobItems) {
this.blobItems = blobItems;
return this;
}
@Override
public XmlWriter toXml(XmlWriter xmlWriter) throws XMLStreamException {
return toXml(xmlWriter, null);
}
@Override
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader) throws XMLStreamException {
return fromXml(xmlReader, null);
}
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @param rootElementName Optional root element name to override the default defined by the model. Used to support
* cases where the model can deserialize from different root element names.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader, String rootElementName)
throws XMLStreamException {
String finalRootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
return xmlReader.readObject(finalRootElementName, reader -> {
BlobHierarchyListSegment deserializedBlobHierarchyListSegment = new BlobHierarchyListSegment();
while (reader.nextElement() != XmlToken.END_ELEMENT) {
QName elementName = reader.getElementName();
if ("BlobPrefixes".equals(elementName.getLocalPart())) {
while (reader.nextElement() != XmlToken.END_ELEMENT) {
elementName = reader.getElementName();
if ("BlobPrefix".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobPrefixes == null) {
deserializedBlobHierarchyListSegment.blobPrefixes = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobPrefixes
.add(BlobPrefix.fromXml(reader, "BlobPrefix"));
} else {
reader.skipElement();
}
}
} else if ("BlobItems".equals(elementName.getLocalPart())) {
while (reader.nextElement() != XmlToken.END_ELEMENT) {
elementName = reader.getElementName();
if ("Blob".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobItems == null) {
deserializedBlobHierarchyListSegment.blobItems = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobItems
.add(BlobItemInternal.fromXml(reader, "Blob"));
} else {
reader.skipElement();
}
}
} else {
reader.skipElement();
}
}
return deserializedBlobHierarchyListSegment;
});
}
} | class BlobHierarchyListSegment implements XmlSerializable<BlobHierarchyListSegment> {
/*
* The BlobPrefixes property.
*/
private List<BlobPrefix> blobPrefixes = new ArrayList<>();
/*
* The BlobItems property.
*/
private List<BlobItemInternal> blobItems = new ArrayList<>();
/**
* Creates an instance of BlobHierarchyListSegment class.
*/
public BlobHierarchyListSegment() {
}
/**
* Get the blobPrefixes property: The BlobPrefixes property.
*
* @return the blobPrefixes value.
*/
public List<BlobPrefix> getBlobPrefixes() {
return this.blobPrefixes;
}
/**
* Set the blobPrefixes property: The BlobPrefixes property.
*
* @param blobPrefixes the blobPrefixes value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobPrefixes(List<BlobPrefix> blobPrefixes) {
this.blobPrefixes = blobPrefixes;
return this;
}
/**
* Get the blobItems property: The BlobItems property.
*
* @return the blobItems value.
*/
public List<BlobItemInternal> getBlobItems() {
return this.blobItems;
}
/**
* Set the blobItems property: The BlobItems property.
*
* @param blobItems the blobItems value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobItems(List<BlobItemInternal> blobItems) {
this.blobItems = blobItems;
return this;
}
@Override
public XmlWriter toXml(XmlWriter xmlWriter) throws XMLStreamException {
return toXml(xmlWriter, null);
}
@Override
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader) throws XMLStreamException {
return fromXml(xmlReader, null);
}
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @param rootElementName Optional root element name to override the default defined by the model. Used to support
* cases where the model can deserialize from different root element names.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader, String rootElementName)
throws XMLStreamException {
String finalRootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
return xmlReader.readObject(finalRootElementName, reader -> {
BlobHierarchyListSegment deserializedBlobHierarchyListSegment = new BlobHierarchyListSegment();
while (reader.nextElement() != XmlToken.END_ELEMENT) {
QName elementName = reader.getElementName();
if ("BlobPrefix".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobPrefixes == null) {
deserializedBlobHierarchyListSegment.blobPrefixes = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobPrefixes.add(BlobPrefix.fromXml(reader, "BlobPrefix"));
} else if ("Blob".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobItems == null) {
deserializedBlobHierarchyListSegment.blobItems = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobItems.add(BlobItemInternal.fromXml(reader, "Blob"));
} else {
reader.skipElement();
}
}
return deserializedBlobHierarchyListSegment;
});
}
} |
You can follow this as an example: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-file-share/swagger/src/main/java/ShareStorageCustomization.java#L37 | public XmlWriter toXml(XmlWriter xmlWriter, String rootElementName) throws XMLStreamException {
rootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
xmlWriter.writeStartElement(rootElementName);
if (this.blobPrefixes != null) {
xmlWriter.writeStartElement("BlobPrefixes");
for (BlobPrefix element : this.blobPrefixes) {
xmlWriter.writeXml(element, "BlobPrefix");
}
xmlWriter.writeEndElement();
}
if (this.blobItems != null) {
xmlWriter.writeStartElement("BlobItems");
for (BlobItemInternal element : this.blobItems) {
xmlWriter.writeXml(element, "Blob");
}
xmlWriter.writeEndElement();
}
return xmlWriter.writeEndElement();
} | } | public XmlWriter toXml(XmlWriter xmlWriter, String rootElementName) throws XMLStreamException {
rootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
xmlWriter.writeStartElement(rootElementName);
if (this.blobPrefixes != null) {
for (BlobPrefix element : this.blobPrefixes) {
xmlWriter.writeXml(element, "BlobPrefix");
}
}
if (this.blobItems != null) {
for (BlobItemInternal element : this.blobItems) {
xmlWriter.writeXml(element, "Blob");
}
}
return xmlWriter.writeEndElement();
} | class BlobHierarchyListSegment implements XmlSerializable<BlobHierarchyListSegment> {
/*
* The BlobPrefixes property.
*/
private List<BlobPrefix> blobPrefixes = new ArrayList<>();
/*
* The BlobItems property.
*/
private List<BlobItemInternal> blobItems = new ArrayList<>();
/**
* Creates an instance of BlobHierarchyListSegment class.
*/
public BlobHierarchyListSegment() {
}
/**
* Get the blobPrefixes property: The BlobPrefixes property.
*
* @return the blobPrefixes value.
*/
public List<BlobPrefix> getBlobPrefixes() {
return this.blobPrefixes;
}
/**
* Set the blobPrefixes property: The BlobPrefixes property.
*
* @param blobPrefixes the blobPrefixes value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobPrefixes(List<BlobPrefix> blobPrefixes) {
this.blobPrefixes = blobPrefixes;
return this;
}
/**
* Get the blobItems property: The BlobItems property.
*
* @return the blobItems value.
*/
public List<BlobItemInternal> getBlobItems() {
return this.blobItems;
}
/**
* Set the blobItems property: The BlobItems property.
*
* @param blobItems the blobItems value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobItems(List<BlobItemInternal> blobItems) {
this.blobItems = blobItems;
return this;
}
@Override
public XmlWriter toXml(XmlWriter xmlWriter) throws XMLStreamException {
return toXml(xmlWriter, null);
}
@Override
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader) throws XMLStreamException {
return fromXml(xmlReader, null);
}
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @param rootElementName Optional root element name to override the default defined by the model. Used to support
* cases where the model can deserialize from different root element names.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader, String rootElementName)
throws XMLStreamException {
String finalRootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
return xmlReader.readObject(finalRootElementName, reader -> {
BlobHierarchyListSegment deserializedBlobHierarchyListSegment = new BlobHierarchyListSegment();
while (reader.nextElement() != XmlToken.END_ELEMENT) {
QName elementName = reader.getElementName();
if ("BlobPrefixes".equals(elementName.getLocalPart())) {
while (reader.nextElement() != XmlToken.END_ELEMENT) {
elementName = reader.getElementName();
if ("BlobPrefix".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobPrefixes == null) {
deserializedBlobHierarchyListSegment.blobPrefixes = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobPrefixes
.add(BlobPrefix.fromXml(reader, "BlobPrefix"));
} else {
reader.skipElement();
}
}
} else if ("BlobItems".equals(elementName.getLocalPart())) {
while (reader.nextElement() != XmlToken.END_ELEMENT) {
elementName = reader.getElementName();
if ("Blob".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobItems == null) {
deserializedBlobHierarchyListSegment.blobItems = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobItems
.add(BlobItemInternal.fromXml(reader, "Blob"));
} else {
reader.skipElement();
}
}
} else {
reader.skipElement();
}
}
return deserializedBlobHierarchyListSegment;
});
}
} | class BlobHierarchyListSegment implements XmlSerializable<BlobHierarchyListSegment> {
/*
* The BlobPrefixes property.
*/
private List<BlobPrefix> blobPrefixes = new ArrayList<>();
/*
* The BlobItems property.
*/
private List<BlobItemInternal> blobItems = new ArrayList<>();
/**
* Creates an instance of BlobHierarchyListSegment class.
*/
public BlobHierarchyListSegment() {
}
/**
* Get the blobPrefixes property: The BlobPrefixes property.
*
* @return the blobPrefixes value.
*/
public List<BlobPrefix> getBlobPrefixes() {
return this.blobPrefixes;
}
/**
* Set the blobPrefixes property: The BlobPrefixes property.
*
* @param blobPrefixes the blobPrefixes value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobPrefixes(List<BlobPrefix> blobPrefixes) {
this.blobPrefixes = blobPrefixes;
return this;
}
/**
* Get the blobItems property: The BlobItems property.
*
* @return the blobItems value.
*/
public List<BlobItemInternal> getBlobItems() {
return this.blobItems;
}
/**
* Set the blobItems property: The BlobItems property.
*
* @param blobItems the blobItems value to set.
* @return the BlobHierarchyListSegment object itself.
*/
public BlobHierarchyListSegment setBlobItems(List<BlobItemInternal> blobItems) {
this.blobItems = blobItems;
return this;
}
@Override
public XmlWriter toXml(XmlWriter xmlWriter) throws XMLStreamException {
return toXml(xmlWriter, null);
}
@Override
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader) throws XMLStreamException {
return fromXml(xmlReader, null);
}
/**
* Reads an instance of BlobHierarchyListSegment from the XmlReader.
*
* @param xmlReader The XmlReader being read.
* @param rootElementName Optional root element name to override the default defined by the model. Used to support
* cases where the model can deserialize from different root element names.
* @return An instance of BlobHierarchyListSegment if the XmlReader was pointing to an instance of it, or null if it
* was pointing to XML null.
* @throws IllegalStateException If the deserialized XML object was missing any required properties.
* @throws XMLStreamException If an error occurs while reading the BlobHierarchyListSegment.
*/
public static BlobHierarchyListSegment fromXml(XmlReader xmlReader, String rootElementName)
throws XMLStreamException {
String finalRootElementName = CoreUtils.isNullOrEmpty(rootElementName) ? "Blobs" : rootElementName;
return xmlReader.readObject(finalRootElementName, reader -> {
BlobHierarchyListSegment deserializedBlobHierarchyListSegment = new BlobHierarchyListSegment();
while (reader.nextElement() != XmlToken.END_ELEMENT) {
QName elementName = reader.getElementName();
if ("BlobPrefix".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobPrefixes == null) {
deserializedBlobHierarchyListSegment.blobPrefixes = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobPrefixes.add(BlobPrefix.fromXml(reader, "BlobPrefix"));
} else if ("Blob".equals(elementName.getLocalPart())) {
if (deserializedBlobHierarchyListSegment.blobItems == null) {
deserializedBlobHierarchyListSegment.blobItems = new ArrayList<>();
}
deserializedBlobHierarchyListSegment.blobItems.add(BlobItemInternal.fromXml(reader, "Blob"));
} else {
reader.skipElement();
}
}
return deserializedBlobHierarchyListSegment;
});
}
} |
Appears the service returns this as a "String" so we'll need a transform here. I'd say to cover all bases it'd be good to do something more general like ```java JsonToken token = reader.currentToken(); if (token == JsonToken.STRING) { deserializedPath.contentLength = Long.parseLong(reader.getString()); } else if (token == JsonToken.NUMBER) { deserializedPath.contentLength = reader.getLong(); } else if (token == JsonToken.NULL) { deserializedPath.contentLength = null; } else { throw new IllegalStateException("Invalid token, expected one of STRING, NUMBER, or NULL. Was " + token); } | public static Path fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Path deserializedPath = new Path();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("name".equals(fieldName)) {
deserializedPath.name = reader.getString();
} else if ("isDirectory".equals(fieldName)) {
deserializedPath.isDirectory = reader.getNullable(JsonReader::getBoolean);
} else if ("lastModified".equals(fieldName)) {
deserializedPath.lastModified = reader.getString();
} else if ("contentLength".equals(fieldName)) {
deserializedPath.contentLength = reader.getNullable(JsonReader::getLong);
} else if ("owner".equals(fieldName)) {
deserializedPath.owner = reader.getString();
} else if ("group".equals(fieldName)) {
deserializedPath.group = reader.getString();
} else if ("permissions".equals(fieldName)) {
deserializedPath.permissions = reader.getString();
} else if ("EncryptionScope".equals(fieldName)) {
deserializedPath.encryptionScope = reader.getString();
} else if ("creationTime".equals(fieldName)) {
deserializedPath.creationTime = reader.getString();
} else if ("expiryTime".equals(fieldName)) {
deserializedPath.expiryTime = reader.getString();
} else if ("EncryptionContext".equals(fieldName)) {
deserializedPath.encryptionContext = reader.getString();
} else if ("etag".equals(fieldName)) {
deserializedPath.eTag = reader.getString();
} else {
reader.skipChildren();
}
}
return deserializedPath;
});
} | deserializedPath.contentLength = reader.getNullable(JsonReader::getLong); | public static Path fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Path deserializedPath = new Path();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("name".equals(fieldName)) {
deserializedPath.name = reader.getString();
} else if ("isDirectory".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.isDirectory = Boolean.parseBoolean(reader.getString());
} else if (token == JsonToken.BOOLEAN) {
deserializedPath.isDirectory = reader.getBoolean();
} else if (token == JsonToken.NULL) {
deserializedPath.isDirectory = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, BOOLEAN, or NULL. Was " + token);
}
} else if ("lastModified".equals(fieldName)) {
deserializedPath.lastModified = reader.getString();
} else if ("contentLength".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.contentLength = Long.parseLong(reader.getString());
} else if (token == JsonToken.NUMBER) {
deserializedPath.contentLength = reader.getLong();
} else if (token == JsonToken.NULL) {
deserializedPath.contentLength = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, NUMBER, or NULL. Was " + token);
}
} else if ("owner".equals(fieldName)) {
deserializedPath.owner = reader.getString();
} else if ("group".equals(fieldName)) {
deserializedPath.group = reader.getString();
} else if ("permissions".equals(fieldName)) {
deserializedPath.permissions = reader.getString();
} else if ("EncryptionScope".equals(fieldName)) {
deserializedPath.encryptionScope = reader.getString();
} else if ("creationTime".equals(fieldName)) {
deserializedPath.creationTime = reader.getString();
} else if ("expiryTime".equals(fieldName)) {
deserializedPath.expiryTime = reader.getString();
} else if ("EncryptionContext".equals(fieldName)) {
deserializedPath.encryptionContext = reader.getString();
} else if ("etag".equals(fieldName)) {
deserializedPath.eTag = reader.getString();
} else {
reader.skipChildren();
}
}
return deserializedPath;
});
} | class Path implements JsonSerializable<Path> {
/*
* The name property.
*/
private String name;
/*
* The isDirectory property.
*/
private Boolean isDirectory;
/*
* The lastModified property.
*/
private String lastModified;
/*
* The contentLength property.
*/
private Long contentLength;
/*
* The owner property.
*/
private String owner;
/*
* The group property.
*/
private String group;
/*
* The permissions property.
*/
private String permissions;
/*
* The name of the encryption scope under which the blob is encrypted.
*/
private String encryptionScope;
/*
* The creationTime property.
*/
private String creationTime;
/*
* The expiryTime property.
*/
private String expiryTime;
/*
* The EncryptionContext property.
*/
private String encryptionContext;
/*
* The etag property.
*/
private String eTag;
/**
* Creates an instance of Path class.
*/
public Path() {
}
/**
* Get the name property: The name property.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: The name property.
*
* @param name the name value to set.
* @return the Path object itself.
*/
public Path setName(String name) {
this.name = name;
return this;
}
/**
* Get the isDirectory property: The isDirectory property.
*
* @return the isDirectory value.
*/
public Boolean isDirectory() {
return this.isDirectory;
}
/**
* Set the isDirectory property: The isDirectory property.
*
* @param isDirectory the isDirectory value to set.
* @return the Path object itself.
*/
public Path setIsDirectory(Boolean isDirectory) {
this.isDirectory = isDirectory;
return this;
}
/**
* Get the lastModified property: The lastModified property.
*
* @return the lastModified value.
*/
public String getLastModified() {
return this.lastModified;
}
/**
* Set the lastModified property: The lastModified property.
*
* @param lastModified the lastModified value to set.
* @return the Path object itself.
*/
public Path setLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
}
/**
* Get the contentLength property: The contentLength property.
*
* @return the contentLength value.
*/
public Long getContentLength() {
return this.contentLength;
}
/**
* Set the contentLength property: The contentLength property.
*
* @param contentLength the contentLength value to set.
* @return the Path object itself.
*/
public Path setContentLength(Long contentLength) {
this.contentLength = contentLength;
return this;
}
/**
* Get the owner property: The owner property.
*
* @return the owner value.
*/
public String getOwner() {
return this.owner;
}
/**
* Set the owner property: The owner property.
*
* @param owner the owner value to set.
* @return the Path object itself.
*/
public Path setOwner(String owner) {
this.owner = owner;
return this;
}
/**
* Get the group property: The group property.
*
* @return the group value.
*/
public String getGroup() {
return this.group;
}
/**
* Set the group property: The group property.
*
* @param group the group value to set.
* @return the Path object itself.
*/
public Path setGroup(String group) {
this.group = group;
return this;
}
/**
* Get the permissions property: The permissions property.
*
* @return the permissions value.
*/
public String getPermissions() {
return this.permissions;
}
/**
* Set the permissions property: The permissions property.
*
* @param permissions the permissions value to set.
* @return the Path object itself.
*/
public Path setPermissions(String permissions) {
this.permissions = permissions;
return this;
}
/**
* Get the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @return the encryptionScope value.
*/
public String getEncryptionScope() {
return this.encryptionScope;
}
/**
* Set the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @param encryptionScope the encryptionScope value to set.
* @return the Path object itself.
*/
public Path setEncryptionScope(String encryptionScope) {
this.encryptionScope = encryptionScope;
return this;
}
/**
* Get the creationTime property: The creationTime property.
*
* @return the creationTime value.
*/
public String getCreationTime() {
return this.creationTime;
}
/**
* Set the creationTime property: The creationTime property.
*
* @param creationTime the creationTime value to set.
* @return the Path object itself.
*/
public Path setCreationTime(String creationTime) {
this.creationTime = creationTime;
return this;
}
/**
* Get the expiryTime property: The expiryTime property.
*
* @return the expiryTime value.
*/
public String getExpiryTime() {
return this.expiryTime;
}
/**
* Set the expiryTime property: The expiryTime property.
*
* @param expiryTime the expiryTime value to set.
* @return the Path object itself.
*/
public Path setExpiryTime(String expiryTime) {
this.expiryTime = expiryTime;
return this;
}
/**
* Get the encryptionContext property: The EncryptionContext property.
*
* @return the encryptionContext value.
*/
public String getEncryptionContext() {
return this.encryptionContext;
}
/**
* Set the encryptionContext property: The EncryptionContext property.
*
* @param encryptionContext the encryptionContext value to set.
* @return the Path object itself.
*/
public Path setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
/**
* Get the eTag property: The etag property.
*
* @return the eTag value.
*/
public String getETag() {
return this.eTag;
}
/**
* Set the eTag property: The etag property.
*
* @param eTag the eTag value to set.
* @return the Path object itself.
*/
public Path setETag(String eTag) {
this.eTag = eTag;
return this;
}
/**
* {@inheritDoc}
*/
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("name", this.name);
jsonWriter.writeBooleanField("isDirectory", this.isDirectory);
jsonWriter.writeStringField("lastModified", this.lastModified);
jsonWriter.writeNumberField("contentLength", this.contentLength);
jsonWriter.writeStringField("owner", this.owner);
jsonWriter.writeStringField("group", this.group);
jsonWriter.writeStringField("permissions", this.permissions);
jsonWriter.writeStringField("EncryptionScope", this.encryptionScope);
jsonWriter.writeStringField("creationTime", this.creationTime);
jsonWriter.writeStringField("expiryTime", this.expiryTime);
jsonWriter.writeStringField("EncryptionContext", this.encryptionContext);
jsonWriter.writeStringField("etag", this.eTag);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of Path from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of Path if the JsonReader was pointing to an instance of it, or null if it was pointing to
* JSON null.
* @throws IOException If an error occurs while reading the Path.
*/
} | class Path implements JsonSerializable<Path> {
/*
* The name property.
*/
private String name;
/*
* The isDirectory property.
*/
private Boolean isDirectory;
/*
* The lastModified property.
*/
private String lastModified;
/*
* The contentLength property.
*/
private Long contentLength;
/*
* The owner property.
*/
private String owner;
/*
* The group property.
*/
private String group;
/*
* The permissions property.
*/
private String permissions;
/*
* The name of the encryption scope under which the blob is encrypted.
*/
private String encryptionScope;
/*
* The creationTime property.
*/
private String creationTime;
/*
* The expiryTime property.
*/
private String expiryTime;
/*
* The EncryptionContext property.
*/
private String encryptionContext;
/*
* The etag property.
*/
private String eTag;
/**
* Creates an instance of Path class.
*/
public Path() {
}
/**
* Get the name property: The name property.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: The name property.
*
* @param name the name value to set.
* @return the Path object itself.
*/
public Path setName(String name) {
this.name = name;
return this;
}
/**
* Get the isDirectory property: The isDirectory property.
*
* @return the isDirectory value.
*/
public Boolean isDirectory() {
return this.isDirectory;
}
/**
* Set the isDirectory property: The isDirectory property.
*
* @param isDirectory the isDirectory value to set.
* @return the Path object itself.
*/
public Path setIsDirectory(Boolean isDirectory) {
this.isDirectory = isDirectory;
return this;
}
/**
* Get the lastModified property: The lastModified property.
*
* @return the lastModified value.
*/
public String getLastModified() {
return this.lastModified;
}
/**
* Set the lastModified property: The lastModified property.
*
* @param lastModified the lastModified value to set.
* @return the Path object itself.
*/
public Path setLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
}
/**
* Get the contentLength property: The contentLength property.
*
* @return the contentLength value.
*/
public Long getContentLength() {
return this.contentLength;
}
/**
* Set the contentLength property: The contentLength property.
*
* @param contentLength the contentLength value to set.
* @return the Path object itself.
*/
public Path setContentLength(Long contentLength) {
this.contentLength = contentLength;
return this;
}
/**
* Get the owner property: The owner property.
*
* @return the owner value.
*/
public String getOwner() {
return this.owner;
}
/**
* Set the owner property: The owner property.
*
* @param owner the owner value to set.
* @return the Path object itself.
*/
public Path setOwner(String owner) {
this.owner = owner;
return this;
}
/**
* Get the group property: The group property.
*
* @return the group value.
*/
public String getGroup() {
return this.group;
}
/**
* Set the group property: The group property.
*
* @param group the group value to set.
* @return the Path object itself.
*/
public Path setGroup(String group) {
this.group = group;
return this;
}
/**
* Get the permissions property: The permissions property.
*
* @return the permissions value.
*/
public String getPermissions() {
return this.permissions;
}
/**
* Set the permissions property: The permissions property.
*
* @param permissions the permissions value to set.
* @return the Path object itself.
*/
public Path setPermissions(String permissions) {
this.permissions = permissions;
return this;
}
/**
* Get the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @return the encryptionScope value.
*/
public String getEncryptionScope() {
return this.encryptionScope;
}
/**
* Set the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @param encryptionScope the encryptionScope value to set.
* @return the Path object itself.
*/
public Path setEncryptionScope(String encryptionScope) {
this.encryptionScope = encryptionScope;
return this;
}
/**
* Get the creationTime property: The creationTime property.
*
* @return the creationTime value.
*/
public String getCreationTime() {
return this.creationTime;
}
/**
* Set the creationTime property: The creationTime property.
*
* @param creationTime the creationTime value to set.
* @return the Path object itself.
*/
public Path setCreationTime(String creationTime) {
this.creationTime = creationTime;
return this;
}
/**
* Get the expiryTime property: The expiryTime property.
*
* @return the expiryTime value.
*/
public String getExpiryTime() {
return this.expiryTime;
}
/**
* Set the expiryTime property: The expiryTime property.
*
* @param expiryTime the expiryTime value to set.
* @return the Path object itself.
*/
public Path setExpiryTime(String expiryTime) {
this.expiryTime = expiryTime;
return this;
}
/**
* Get the encryptionContext property: The EncryptionContext property.
*
* @return the encryptionContext value.
*/
public String getEncryptionContext() {
return this.encryptionContext;
}
/**
* Set the encryptionContext property: The EncryptionContext property.
*
* @param encryptionContext the encryptionContext value to set.
* @return the Path object itself.
*/
public Path setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
/**
* Get the eTag property: The etag property.
*
* @return the eTag value.
*/
public String getETag() {
return this.eTag;
}
/**
* Set the eTag property: The etag property.
*
* @param eTag the eTag value to set.
* @return the Path object itself.
*/
public Path setETag(String eTag) {
this.eTag = eTag;
return this;
}
/**
* {@inheritDoc}
*/
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("name", this.name);
if (isDirectory != null) {
jsonWriter.writeStringField("isDirectory", String.valueOf(this.isDirectory));
}
jsonWriter.writeStringField("lastModified", this.lastModified);
if (contentLength != null) {
jsonWriter.writeStringField("contentLength", String.valueOf(this.contentLength));
}
jsonWriter.writeStringField("owner", this.owner);
jsonWriter.writeStringField("group", this.group);
jsonWriter.writeStringField("permissions", this.permissions);
jsonWriter.writeStringField("EncryptionScope", this.encryptionScope);
jsonWriter.writeStringField("creationTime", this.creationTime);
jsonWriter.writeStringField("expiryTime", this.expiryTime);
jsonWriter.writeStringField("EncryptionContext", this.encryptionContext);
jsonWriter.writeStringField("etag", this.eTag);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of Path from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of Path if the JsonReader was pointing to an instance of it, or null if it was pointing to
* JSON null.
* @throws IOException IOException If an error occurs while reading the Path.
* @throws IllegalStateException If a token is not an allowed type.
*/
} |
From the other comment about the type being wrong: ```java if (contentLength != null) { jsonWriter.writeStringField("contentLength", String.valueOf(contentLength)); } ``` | public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("name", this.name);
jsonWriter.writeBooleanField("isDirectory", this.isDirectory);
jsonWriter.writeStringField("lastModified", this.lastModified);
jsonWriter.writeNumberField("contentLength", this.contentLength);
jsonWriter.writeStringField("owner", this.owner);
jsonWriter.writeStringField("group", this.group);
jsonWriter.writeStringField("permissions", this.permissions);
jsonWriter.writeStringField("EncryptionScope", this.encryptionScope);
jsonWriter.writeStringField("creationTime", this.creationTime);
jsonWriter.writeStringField("expiryTime", this.expiryTime);
jsonWriter.writeStringField("EncryptionContext", this.encryptionContext);
jsonWriter.writeStringField("etag", this.eTag);
return jsonWriter.writeEndObject();
} | jsonWriter.writeNumberField("contentLength", this.contentLength); | public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("name", this.name);
if (isDirectory != null) {
jsonWriter.writeStringField("isDirectory", String.valueOf(this.isDirectory));
}
jsonWriter.writeStringField("lastModified", this.lastModified);
if (contentLength != null) {
jsonWriter.writeStringField("contentLength", String.valueOf(this.contentLength));
}
jsonWriter.writeStringField("owner", this.owner);
jsonWriter.writeStringField("group", this.group);
jsonWriter.writeStringField("permissions", this.permissions);
jsonWriter.writeStringField("EncryptionScope", this.encryptionScope);
jsonWriter.writeStringField("creationTime", this.creationTime);
jsonWriter.writeStringField("expiryTime", this.expiryTime);
jsonWriter.writeStringField("EncryptionContext", this.encryptionContext);
jsonWriter.writeStringField("etag", this.eTag);
return jsonWriter.writeEndObject();
} | class Path implements JsonSerializable<Path> {
/*
* The name property.
*/
private String name;
/*
* The isDirectory property.
*/
private Boolean isDirectory;
/*
* The lastModified property.
*/
private String lastModified;
/*
* The contentLength property.
*/
private Long contentLength;
/*
* The owner property.
*/
private String owner;
/*
* The group property.
*/
private String group;
/*
* The permissions property.
*/
private String permissions;
/*
* The name of the encryption scope under which the blob is encrypted.
*/
private String encryptionScope;
/*
* The creationTime property.
*/
private String creationTime;
/*
* The expiryTime property.
*/
private String expiryTime;
/*
* The EncryptionContext property.
*/
private String encryptionContext;
/*
* The etag property.
*/
private String eTag;
/**
* Creates an instance of Path class.
*/
public Path() {
}
/**
* Get the name property: The name property.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: The name property.
*
* @param name the name value to set.
* @return the Path object itself.
*/
public Path setName(String name) {
this.name = name;
return this;
}
/**
* Get the isDirectory property: The isDirectory property.
*
* @return the isDirectory value.
*/
public Boolean isDirectory() {
return this.isDirectory;
}
/**
* Set the isDirectory property: The isDirectory property.
*
* @param isDirectory the isDirectory value to set.
* @return the Path object itself.
*/
public Path setIsDirectory(Boolean isDirectory) {
this.isDirectory = isDirectory;
return this;
}
/**
* Get the lastModified property: The lastModified property.
*
* @return the lastModified value.
*/
public String getLastModified() {
return this.lastModified;
}
/**
* Set the lastModified property: The lastModified property.
*
* @param lastModified the lastModified value to set.
* @return the Path object itself.
*/
public Path setLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
}
/**
* Get the contentLength property: The contentLength property.
*
* @return the contentLength value.
*/
public Long getContentLength() {
return this.contentLength;
}
/**
* Set the contentLength property: The contentLength property.
*
* @param contentLength the contentLength value to set.
* @return the Path object itself.
*/
public Path setContentLength(Long contentLength) {
this.contentLength = contentLength;
return this;
}
/**
* Get the owner property: The owner property.
*
* @return the owner value.
*/
public String getOwner() {
return this.owner;
}
/**
* Set the owner property: The owner property.
*
* @param owner the owner value to set.
* @return the Path object itself.
*/
public Path setOwner(String owner) {
this.owner = owner;
return this;
}
/**
* Get the group property: The group property.
*
* @return the group value.
*/
public String getGroup() {
return this.group;
}
/**
* Set the group property: The group property.
*
* @param group the group value to set.
* @return the Path object itself.
*/
public Path setGroup(String group) {
this.group = group;
return this;
}
/**
* Get the permissions property: The permissions property.
*
* @return the permissions value.
*/
public String getPermissions() {
return this.permissions;
}
/**
* Set the permissions property: The permissions property.
*
* @param permissions the permissions value to set.
* @return the Path object itself.
*/
public Path setPermissions(String permissions) {
this.permissions = permissions;
return this;
}
/**
* Get the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @return the encryptionScope value.
*/
public String getEncryptionScope() {
return this.encryptionScope;
}
/**
* Set the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @param encryptionScope the encryptionScope value to set.
* @return the Path object itself.
*/
public Path setEncryptionScope(String encryptionScope) {
this.encryptionScope = encryptionScope;
return this;
}
/**
* Get the creationTime property: The creationTime property.
*
* @return the creationTime value.
*/
public String getCreationTime() {
return this.creationTime;
}
/**
* Set the creationTime property: The creationTime property.
*
* @param creationTime the creationTime value to set.
* @return the Path object itself.
*/
public Path setCreationTime(String creationTime) {
this.creationTime = creationTime;
return this;
}
/**
* Get the expiryTime property: The expiryTime property.
*
* @return the expiryTime value.
*/
public String getExpiryTime() {
return this.expiryTime;
}
/**
* Set the expiryTime property: The expiryTime property.
*
* @param expiryTime the expiryTime value to set.
* @return the Path object itself.
*/
public Path setExpiryTime(String expiryTime) {
this.expiryTime = expiryTime;
return this;
}
/**
* Get the encryptionContext property: The EncryptionContext property.
*
* @return the encryptionContext value.
*/
public String getEncryptionContext() {
return this.encryptionContext;
}
/**
* Set the encryptionContext property: The EncryptionContext property.
*
* @param encryptionContext the encryptionContext value to set.
* @return the Path object itself.
*/
public Path setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
/**
* Get the eTag property: The etag property.
*
* @return the eTag value.
*/
public String getETag() {
return this.eTag;
}
/**
* Set the eTag property: The etag property.
*
* @param eTag the eTag value to set.
* @return the Path object itself.
*/
public Path setETag(String eTag) {
this.eTag = eTag;
return this;
}
/**
* {@inheritDoc}
*/
@Override
/**
* Reads an instance of Path from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of Path if the JsonReader was pointing to an instance of it, or null if it was pointing to
* JSON null.
* @throws IOException If an error occurs while reading the Path.
*/
public static Path fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Path deserializedPath = new Path();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("name".equals(fieldName)) {
deserializedPath.name = reader.getString();
} else if ("isDirectory".equals(fieldName)) {
deserializedPath.isDirectory = reader.getNullable(JsonReader::getBoolean);
} else if ("lastModified".equals(fieldName)) {
deserializedPath.lastModified = reader.getString();
} else if ("contentLength".equals(fieldName)) {
deserializedPath.contentLength = reader.getNullable(JsonReader::getLong);
} else if ("owner".equals(fieldName)) {
deserializedPath.owner = reader.getString();
} else if ("group".equals(fieldName)) {
deserializedPath.group = reader.getString();
} else if ("permissions".equals(fieldName)) {
deserializedPath.permissions = reader.getString();
} else if ("EncryptionScope".equals(fieldName)) {
deserializedPath.encryptionScope = reader.getString();
} else if ("creationTime".equals(fieldName)) {
deserializedPath.creationTime = reader.getString();
} else if ("expiryTime".equals(fieldName)) {
deserializedPath.expiryTime = reader.getString();
} else if ("EncryptionContext".equals(fieldName)) {
deserializedPath.encryptionContext = reader.getString();
} else if ("etag".equals(fieldName)) {
deserializedPath.eTag = reader.getString();
} else {
reader.skipChildren();
}
}
return deserializedPath;
});
}
} | class Path implements JsonSerializable<Path> {
/*
* The name property.
*/
private String name;
/*
* The isDirectory property.
*/
private Boolean isDirectory;
/*
* The lastModified property.
*/
private String lastModified;
/*
* The contentLength property.
*/
private Long contentLength;
/*
* The owner property.
*/
private String owner;
/*
* The group property.
*/
private String group;
/*
* The permissions property.
*/
private String permissions;
/*
* The name of the encryption scope under which the blob is encrypted.
*/
private String encryptionScope;
/*
* The creationTime property.
*/
private String creationTime;
/*
* The expiryTime property.
*/
private String expiryTime;
/*
* The EncryptionContext property.
*/
private String encryptionContext;
/*
* The etag property.
*/
private String eTag;
/**
* Creates an instance of Path class.
*/
public Path() {
}
/**
* Get the name property: The name property.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: The name property.
*
* @param name the name value to set.
* @return the Path object itself.
*/
public Path setName(String name) {
this.name = name;
return this;
}
/**
* Get the isDirectory property: The isDirectory property.
*
* @return the isDirectory value.
*/
public Boolean isDirectory() {
return this.isDirectory;
}
/**
* Set the isDirectory property: The isDirectory property.
*
* @param isDirectory the isDirectory value to set.
* @return the Path object itself.
*/
public Path setIsDirectory(Boolean isDirectory) {
this.isDirectory = isDirectory;
return this;
}
/**
* Get the lastModified property: The lastModified property.
*
* @return the lastModified value.
*/
public String getLastModified() {
return this.lastModified;
}
/**
* Set the lastModified property: The lastModified property.
*
* @param lastModified the lastModified value to set.
* @return the Path object itself.
*/
public Path setLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
}
/**
* Get the contentLength property: The contentLength property.
*
* @return the contentLength value.
*/
public Long getContentLength() {
return this.contentLength;
}
/**
* Set the contentLength property: The contentLength property.
*
* @param contentLength the contentLength value to set.
* @return the Path object itself.
*/
public Path setContentLength(Long contentLength) {
this.contentLength = contentLength;
return this;
}
/**
* Get the owner property: The owner property.
*
* @return the owner value.
*/
public String getOwner() {
return this.owner;
}
/**
* Set the owner property: The owner property.
*
* @param owner the owner value to set.
* @return the Path object itself.
*/
public Path setOwner(String owner) {
this.owner = owner;
return this;
}
/**
* Get the group property: The group property.
*
* @return the group value.
*/
public String getGroup() {
return this.group;
}
/**
* Set the group property: The group property.
*
* @param group the group value to set.
* @return the Path object itself.
*/
public Path setGroup(String group) {
this.group = group;
return this;
}
/**
* Get the permissions property: The permissions property.
*
* @return the permissions value.
*/
public String getPermissions() {
return this.permissions;
}
/**
* Set the permissions property: The permissions property.
*
* @param permissions the permissions value to set.
* @return the Path object itself.
*/
public Path setPermissions(String permissions) {
this.permissions = permissions;
return this;
}
/**
* Get the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @return the encryptionScope value.
*/
public String getEncryptionScope() {
return this.encryptionScope;
}
/**
* Set the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @param encryptionScope the encryptionScope value to set.
* @return the Path object itself.
*/
public Path setEncryptionScope(String encryptionScope) {
this.encryptionScope = encryptionScope;
return this;
}
/**
* Get the creationTime property: The creationTime property.
*
* @return the creationTime value.
*/
public String getCreationTime() {
return this.creationTime;
}
/**
* Set the creationTime property: The creationTime property.
*
* @param creationTime the creationTime value to set.
* @return the Path object itself.
*/
public Path setCreationTime(String creationTime) {
this.creationTime = creationTime;
return this;
}
/**
* Get the expiryTime property: The expiryTime property.
*
* @return the expiryTime value.
*/
public String getExpiryTime() {
return this.expiryTime;
}
/**
* Set the expiryTime property: The expiryTime property.
*
* @param expiryTime the expiryTime value to set.
* @return the Path object itself.
*/
public Path setExpiryTime(String expiryTime) {
this.expiryTime = expiryTime;
return this;
}
/**
* Get the encryptionContext property: The EncryptionContext property.
*
* @return the encryptionContext value.
*/
public String getEncryptionContext() {
return this.encryptionContext;
}
/**
* Set the encryptionContext property: The EncryptionContext property.
*
* @param encryptionContext the encryptionContext value to set.
* @return the Path object itself.
*/
public Path setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
/**
* Get the eTag property: The etag property.
*
* @return the eTag value.
*/
public String getETag() {
return this.eTag;
}
/**
* Set the eTag property: The etag property.
*
* @param eTag the eTag value to set.
* @return the Path object itself.
*/
public Path setETag(String eTag) {
this.eTag = eTag;
return this;
}
/**
* {@inheritDoc}
*/
@Override
/**
* Reads an instance of Path from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of Path if the JsonReader was pointing to an instance of it, or null if it was pointing to
* JSON null.
* @throws IOException IOException If an error occurs while reading the Path.
* @throws IllegalStateException If a token is not an allowed type.
*/
public static Path fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Path deserializedPath = new Path();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("name".equals(fieldName)) {
deserializedPath.name = reader.getString();
} else if ("isDirectory".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.isDirectory = Boolean.parseBoolean(reader.getString());
} else if (token == JsonToken.BOOLEAN) {
deserializedPath.isDirectory = reader.getBoolean();
} else if (token == JsonToken.NULL) {
deserializedPath.isDirectory = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, BOOLEAN, or NULL. Was " + token);
}
} else if ("lastModified".equals(fieldName)) {
deserializedPath.lastModified = reader.getString();
} else if ("contentLength".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.contentLength = Long.parseLong(reader.getString());
} else if (token == JsonToken.NUMBER) {
deserializedPath.contentLength = reader.getLong();
} else if (token == JsonToken.NULL) {
deserializedPath.contentLength = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, NUMBER, or NULL. Was " + token);
}
} else if ("owner".equals(fieldName)) {
deserializedPath.owner = reader.getString();
} else if ("group".equals(fieldName)) {
deserializedPath.group = reader.getString();
} else if ("permissions".equals(fieldName)) {
deserializedPath.permissions = reader.getString();
} else if ("EncryptionScope".equals(fieldName)) {
deserializedPath.encryptionScope = reader.getString();
} else if ("creationTime".equals(fieldName)) {
deserializedPath.creationTime = reader.getString();
} else if ("expiryTime".equals(fieldName)) {
deserializedPath.expiryTime = reader.getString();
} else if ("EncryptionContext".equals(fieldName)) {
deserializedPath.encryptionContext = reader.getString();
} else if ("etag".equals(fieldName)) {
deserializedPath.eTag = reader.getString();
} else {
reader.skipChildren();
}
}
return deserializedPath;
});
}
} |
```suggestion if (isDirectory != null) { jsonWriter.writeStringField("isDirectory", String.valueOf(isDirectory)); } ``` Need to serialize this as the JSON string it is. | public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("name", this.name);
jsonWriter.writeBooleanField("isDirectory", this.isDirectory);
jsonWriter.writeStringField("lastModified", this.lastModified);
if (contentLength != null) {
jsonWriter.writeStringField("contentLength", String.valueOf(this.contentLength));
}
jsonWriter.writeStringField("owner", this.owner);
jsonWriter.writeStringField("group", this.group);
jsonWriter.writeStringField("permissions", this.permissions);
jsonWriter.writeStringField("EncryptionScope", this.encryptionScope);
jsonWriter.writeStringField("creationTime", this.creationTime);
jsonWriter.writeStringField("expiryTime", this.expiryTime);
jsonWriter.writeStringField("EncryptionContext", this.encryptionContext);
jsonWriter.writeStringField("etag", this.eTag);
return jsonWriter.writeEndObject();
} | jsonWriter.writeBooleanField("isDirectory", this.isDirectory); | public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("name", this.name);
if (isDirectory != null) {
jsonWriter.writeStringField("isDirectory", String.valueOf(this.isDirectory));
}
jsonWriter.writeStringField("lastModified", this.lastModified);
if (contentLength != null) {
jsonWriter.writeStringField("contentLength", String.valueOf(this.contentLength));
}
jsonWriter.writeStringField("owner", this.owner);
jsonWriter.writeStringField("group", this.group);
jsonWriter.writeStringField("permissions", this.permissions);
jsonWriter.writeStringField("EncryptionScope", this.encryptionScope);
jsonWriter.writeStringField("creationTime", this.creationTime);
jsonWriter.writeStringField("expiryTime", this.expiryTime);
jsonWriter.writeStringField("EncryptionContext", this.encryptionContext);
jsonWriter.writeStringField("etag", this.eTag);
return jsonWriter.writeEndObject();
} | class Path implements JsonSerializable<Path> {
/*
* The name property.
*/
private String name;
/*
* The isDirectory property.
*/
private Boolean isDirectory;
/*
* The lastModified property.
*/
private String lastModified;
/*
* The contentLength property.
*/
private Long contentLength;
/*
* The owner property.
*/
private String owner;
/*
* The group property.
*/
private String group;
/*
* The permissions property.
*/
private String permissions;
/*
* The name of the encryption scope under which the blob is encrypted.
*/
private String encryptionScope;
/*
* The creationTime property.
*/
private String creationTime;
/*
* The expiryTime property.
*/
private String expiryTime;
/*
* The EncryptionContext property.
*/
private String encryptionContext;
/*
* The etag property.
*/
private String eTag;
/**
* Creates an instance of Path class.
*/
public Path() {
}
/**
* Get the name property: The name property.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: The name property.
*
* @param name the name value to set.
* @return the Path object itself.
*/
public Path setName(String name) {
this.name = name;
return this;
}
/**
* Get the isDirectory property: The isDirectory property.
*
* @return the isDirectory value.
*/
public Boolean isDirectory() {
return this.isDirectory;
}
/**
* Set the isDirectory property: The isDirectory property.
*
* @param isDirectory the isDirectory value to set.
* @return the Path object itself.
*/
public Path setIsDirectory(Boolean isDirectory) {
this.isDirectory = isDirectory;
return this;
}
/**
* Get the lastModified property: The lastModified property.
*
* @return the lastModified value.
*/
public String getLastModified() {
return this.lastModified;
}
/**
* Set the lastModified property: The lastModified property.
*
* @param lastModified the lastModified value to set.
* @return the Path object itself.
*/
public Path setLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
}
/**
* Get the contentLength property: The contentLength property.
*
* @return the contentLength value.
*/
public Long getContentLength() {
return this.contentLength;
}
/**
* Set the contentLength property: The contentLength property.
*
* @param contentLength the contentLength value to set.
* @return the Path object itself.
*/
public Path setContentLength(Long contentLength) {
this.contentLength = contentLength;
return this;
}
/**
* Get the owner property: The owner property.
*
* @return the owner value.
*/
public String getOwner() {
return this.owner;
}
/**
* Set the owner property: The owner property.
*
* @param owner the owner value to set.
* @return the Path object itself.
*/
public Path setOwner(String owner) {
this.owner = owner;
return this;
}
/**
* Get the group property: The group property.
*
* @return the group value.
*/
public String getGroup() {
return this.group;
}
/**
* Set the group property: The group property.
*
* @param group the group value to set.
* @return the Path object itself.
*/
public Path setGroup(String group) {
this.group = group;
return this;
}
/**
* Get the permissions property: The permissions property.
*
* @return the permissions value.
*/
public String getPermissions() {
return this.permissions;
}
/**
* Set the permissions property: The permissions property.
*
* @param permissions the permissions value to set.
* @return the Path object itself.
*/
public Path setPermissions(String permissions) {
this.permissions = permissions;
return this;
}
/**
* Get the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @return the encryptionScope value.
*/
public String getEncryptionScope() {
return this.encryptionScope;
}
/**
* Set the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @param encryptionScope the encryptionScope value to set.
* @return the Path object itself.
*/
public Path setEncryptionScope(String encryptionScope) {
this.encryptionScope = encryptionScope;
return this;
}
/**
* Get the creationTime property: The creationTime property.
*
* @return the creationTime value.
*/
public String getCreationTime() {
return this.creationTime;
}
/**
* Set the creationTime property: The creationTime property.
*
* @param creationTime the creationTime value to set.
* @return the Path object itself.
*/
public Path setCreationTime(String creationTime) {
this.creationTime = creationTime;
return this;
}
/**
* Get the expiryTime property: The expiryTime property.
*
* @return the expiryTime value.
*/
public String getExpiryTime() {
return this.expiryTime;
}
/**
* Set the expiryTime property: The expiryTime property.
*
* @param expiryTime the expiryTime value to set.
* @return the Path object itself.
*/
public Path setExpiryTime(String expiryTime) {
this.expiryTime = expiryTime;
return this;
}
/**
* Get the encryptionContext property: The EncryptionContext property.
*
* @return the encryptionContext value.
*/
public String getEncryptionContext() {
return this.encryptionContext;
}
/**
* Set the encryptionContext property: The EncryptionContext property.
*
* @param encryptionContext the encryptionContext value to set.
* @return the Path object itself.
*/
public Path setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
/**
* Get the eTag property: The etag property.
*
* @return the eTag value.
*/
public String getETag() {
return this.eTag;
}
/**
* Set the eTag property: The etag property.
*
* @param eTag the eTag value to set.
* @return the Path object itself.
*/
public Path setETag(String eTag) {
this.eTag = eTag;
return this;
}
/**
* {@inheritDoc}
*/
@Override
/**
* Reads an instance of Path from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of Path if the JsonReader was pointing to an instance of it, or null if it was pointing to
* JSON null.
* @throws IOException IOException If an error occurs while reading the Path.
* @throws IllegalStateException If a token is not an allowed type.
*/
public static Path fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Path deserializedPath = new Path();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("name".equals(fieldName)) {
deserializedPath.name = reader.getString();
} else if ("isDirectory".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.isDirectory = Boolean.parseBoolean(reader.getString());
} else if (token == JsonToken.BOOLEAN) {
deserializedPath.isDirectory = reader.getBoolean();
} else if (token == JsonToken.NULL) {
deserializedPath.isDirectory = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, BOOLEAN, or NULL. Was " + token);
}
} else if ("lastModified".equals(fieldName)) {
deserializedPath.lastModified = reader.getString();
} else if ("contentLength".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.contentLength = Long.parseLong(reader.getString());
} else if (token == JsonToken.NUMBER) {
deserializedPath.contentLength = reader.getLong();
} else if (token == JsonToken.NULL) {
deserializedPath.contentLength = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, NUMBER, or NULL. Was " + token);
}
} else if ("owner".equals(fieldName)) {
deserializedPath.owner = reader.getString();
} else if ("group".equals(fieldName)) {
deserializedPath.group = reader.getString();
} else if ("permissions".equals(fieldName)) {
deserializedPath.permissions = reader.getString();
} else if ("EncryptionScope".equals(fieldName)) {
deserializedPath.encryptionScope = reader.getString();
} else if ("creationTime".equals(fieldName)) {
deserializedPath.creationTime = reader.getString();
} else if ("expiryTime".equals(fieldName)) {
deserializedPath.expiryTime = reader.getString();
} else if ("EncryptionContext".equals(fieldName)) {
deserializedPath.encryptionContext = reader.getString();
} else if ("etag".equals(fieldName)) {
deserializedPath.eTag = reader.getString();
} else {
reader.skipChildren();
}
}
return deserializedPath;
});
}
} | class Path implements JsonSerializable<Path> {
/*
* The name property.
*/
private String name;
/*
* The isDirectory property.
*/
private Boolean isDirectory;
/*
* The lastModified property.
*/
private String lastModified;
/*
* The contentLength property.
*/
private Long contentLength;
/*
* The owner property.
*/
private String owner;
/*
* The group property.
*/
private String group;
/*
* The permissions property.
*/
private String permissions;
/*
* The name of the encryption scope under which the blob is encrypted.
*/
private String encryptionScope;
/*
* The creationTime property.
*/
private String creationTime;
/*
* The expiryTime property.
*/
private String expiryTime;
/*
* The EncryptionContext property.
*/
private String encryptionContext;
/*
* The etag property.
*/
private String eTag;
/**
* Creates an instance of Path class.
*/
public Path() {
}
/**
* Get the name property: The name property.
*
* @return the name value.
*/
public String getName() {
return this.name;
}
/**
* Set the name property: The name property.
*
* @param name the name value to set.
* @return the Path object itself.
*/
public Path setName(String name) {
this.name = name;
return this;
}
/**
* Get the isDirectory property: The isDirectory property.
*
* @return the isDirectory value.
*/
public Boolean isDirectory() {
return this.isDirectory;
}
/**
* Set the isDirectory property: The isDirectory property.
*
* @param isDirectory the isDirectory value to set.
* @return the Path object itself.
*/
public Path setIsDirectory(Boolean isDirectory) {
this.isDirectory = isDirectory;
return this;
}
/**
* Get the lastModified property: The lastModified property.
*
* @return the lastModified value.
*/
public String getLastModified() {
return this.lastModified;
}
/**
* Set the lastModified property: The lastModified property.
*
* @param lastModified the lastModified value to set.
* @return the Path object itself.
*/
public Path setLastModified(String lastModified) {
this.lastModified = lastModified;
return this;
}
/**
* Get the contentLength property: The contentLength property.
*
* @return the contentLength value.
*/
public Long getContentLength() {
return this.contentLength;
}
/**
* Set the contentLength property: The contentLength property.
*
* @param contentLength the contentLength value to set.
* @return the Path object itself.
*/
public Path setContentLength(Long contentLength) {
this.contentLength = contentLength;
return this;
}
/**
* Get the owner property: The owner property.
*
* @return the owner value.
*/
public String getOwner() {
return this.owner;
}
/**
* Set the owner property: The owner property.
*
* @param owner the owner value to set.
* @return the Path object itself.
*/
public Path setOwner(String owner) {
this.owner = owner;
return this;
}
/**
* Get the group property: The group property.
*
* @return the group value.
*/
public String getGroup() {
return this.group;
}
/**
* Set the group property: The group property.
*
* @param group the group value to set.
* @return the Path object itself.
*/
public Path setGroup(String group) {
this.group = group;
return this;
}
/**
* Get the permissions property: The permissions property.
*
* @return the permissions value.
*/
public String getPermissions() {
return this.permissions;
}
/**
* Set the permissions property: The permissions property.
*
* @param permissions the permissions value to set.
* @return the Path object itself.
*/
public Path setPermissions(String permissions) {
this.permissions = permissions;
return this;
}
/**
* Get the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @return the encryptionScope value.
*/
public String getEncryptionScope() {
return this.encryptionScope;
}
/**
* Set the encryptionScope property: The name of the encryption scope under which the blob is encrypted.
*
* @param encryptionScope the encryptionScope value to set.
* @return the Path object itself.
*/
public Path setEncryptionScope(String encryptionScope) {
this.encryptionScope = encryptionScope;
return this;
}
/**
* Get the creationTime property: The creationTime property.
*
* @return the creationTime value.
*/
public String getCreationTime() {
return this.creationTime;
}
/**
* Set the creationTime property: The creationTime property.
*
* @param creationTime the creationTime value to set.
* @return the Path object itself.
*/
public Path setCreationTime(String creationTime) {
this.creationTime = creationTime;
return this;
}
/**
* Get the expiryTime property: The expiryTime property.
*
* @return the expiryTime value.
*/
public String getExpiryTime() {
return this.expiryTime;
}
/**
* Set the expiryTime property: The expiryTime property.
*
* @param expiryTime the expiryTime value to set.
* @return the Path object itself.
*/
public Path setExpiryTime(String expiryTime) {
this.expiryTime = expiryTime;
return this;
}
/**
* Get the encryptionContext property: The EncryptionContext property.
*
* @return the encryptionContext value.
*/
public String getEncryptionContext() {
return this.encryptionContext;
}
/**
* Set the encryptionContext property: The EncryptionContext property.
*
* @param encryptionContext the encryptionContext value to set.
* @return the Path object itself.
*/
public Path setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
/**
* Get the eTag property: The etag property.
*
* @return the eTag value.
*/
public String getETag() {
return this.eTag;
}
/**
* Set the eTag property: The etag property.
*
* @param eTag the eTag value to set.
* @return the Path object itself.
*/
public Path setETag(String eTag) {
this.eTag = eTag;
return this;
}
/**
* {@inheritDoc}
*/
@Override
/**
* Reads an instance of Path from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of Path if the JsonReader was pointing to an instance of it, or null if it was pointing to
* JSON null.
* @throws IOException IOException If an error occurs while reading the Path.
* @throws IllegalStateException If a token is not an allowed type.
*/
public static Path fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Path deserializedPath = new Path();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("name".equals(fieldName)) {
deserializedPath.name = reader.getString();
} else if ("isDirectory".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.isDirectory = Boolean.parseBoolean(reader.getString());
} else if (token == JsonToken.BOOLEAN) {
deserializedPath.isDirectory = reader.getBoolean();
} else if (token == JsonToken.NULL) {
deserializedPath.isDirectory = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, BOOLEAN, or NULL. Was " + token);
}
} else if ("lastModified".equals(fieldName)) {
deserializedPath.lastModified = reader.getString();
} else if ("contentLength".equals(fieldName)) {
JsonToken token = reader.currentToken();
if (token == JsonToken.STRING) {
deserializedPath.contentLength = Long.parseLong(reader.getString());
} else if (token == JsonToken.NUMBER) {
deserializedPath.contentLength = reader.getLong();
} else if (token == JsonToken.NULL) {
deserializedPath.contentLength = null;
} else {
throw new IllegalStateException(
"Invalid token, expected one of STRING, NUMBER, or NULL. Was " + token);
}
} else if ("owner".equals(fieldName)) {
deserializedPath.owner = reader.getString();
} else if ("group".equals(fieldName)) {
deserializedPath.group = reader.getString();
} else if ("permissions".equals(fieldName)) {
deserializedPath.permissions = reader.getString();
} else if ("EncryptionScope".equals(fieldName)) {
deserializedPath.encryptionScope = reader.getString();
} else if ("creationTime".equals(fieldName)) {
deserializedPath.creationTime = reader.getString();
} else if ("expiryTime".equals(fieldName)) {
deserializedPath.expiryTime = reader.getString();
} else if ("EncryptionContext".equals(fieldName)) {
deserializedPath.encryptionContext = reader.getString();
} else if ("etag".equals(fieldName)) {
deserializedPath.eTag = reader.getString();
} else {
reader.skipChildren();
}
}
return deserializedPath;
});
}
} |
Consider using `getTokenCredential` everywhere to reinforce the pattern | private void setup(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) {
if (interceptorManager.isPlaybackMode()) {
String endpoint = new ConfigurationClientCredentials(FAKE_CONNECTION_STRING).getBaseUri();
client = new ConfigurationClientBuilder()
.credential(new MockTokenCredential())
.endpoint(endpoint)
.httpClient(interceptorManager.getPlaybackClient())
.buildClient();
} else {
tokenCredential = TestHelper.getTokenCredential(interceptorManager);
String endpoint = Configuration.getGlobalConfiguration().get("AZ_CONFIG_ENDPOINT");
ConfigurationClientBuilder builder = new ConfigurationClientBuilder()
.endpoint(endpoint)
.credential(tokenCredential)
.serviceVersion(serviceVersion);
builder = setHttpClient(httpClient, builder);
if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
client = builder.buildClient();
}
if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3447");
}
} | .endpoint(endpoint) | private void setup(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) {
if (interceptorManager.isPlaybackMode()) {
String endpoint = new ConfigurationClientCredentials(FAKE_CONNECTION_STRING).getBaseUri();
client = new ConfigurationClientBuilder()
.credential(TestHelper.getTokenCredential(interceptorManager))
.endpoint(endpoint)
.httpClient(interceptorManager.getPlaybackClient())
.buildClient();
} else {
tokenCredential = TestHelper.getTokenCredential(interceptorManager);
String endpoint = Configuration.getGlobalConfiguration().get("AZ_CONFIG_ENDPOINT");
ConfigurationClientBuilder builder = new ConfigurationClientBuilder()
.endpoint(endpoint)
.credential(tokenCredential)
.serviceVersion(serviceVersion);
builder = setHttpClient(httpClient, builder);
if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
client = builder.buildClient();
}
if (!interceptorManager.isLiveMode()) {
interceptorManager.removeSanitizers("AZSDK3447");
}
} | class AadCredentialTest extends TestProxyTestBase {
private static ConfigurationClient client;
static String connectionString;
static TokenCredential tokenCredential;
ConfigurationClientBuilder setHttpClient(HttpClient httpClient, ConfigurationClientBuilder builder) {
if (interceptorManager.isPlaybackMode()) {
return builder.httpClient(buildSyncAssertingClient(interceptorManager.getPlaybackClient()));
}
return builder.httpClient(buildSyncAssertingClient(httpClient));
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.data.appconfiguration.TestHelper
public void aadAuthenticationAzConfigClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) {
setup(httpClient, serviceVersion);
final String key = "newKey";
final String value = "newValue";
ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value);
Assertions.assertEquals(addedSetting.getKey(), key);
Assertions.assertEquals(addedSetting.getValue(), value);
}
} | class AadCredentialTest extends TestProxyTestBase {
private static ConfigurationClient client;
static String connectionString;
static TokenCredential tokenCredential;
ConfigurationClientBuilder setHttpClient(HttpClient httpClient, ConfigurationClientBuilder builder) {
if (interceptorManager.isPlaybackMode()) {
return builder.httpClient(buildSyncAssertingClient(interceptorManager.getPlaybackClient()));
}
return builder.httpClient(buildSyncAssertingClient(httpClient));
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.data.appconfiguration.TestHelper
public void aadAuthenticationAzConfigClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) {
setup(httpClient, serviceVersion);
final String key = "newKey";
final String value = "newValue";
ConfigurationSetting addedSetting = client.setConfigurationSetting(key, null, value);
Assertions.assertEquals(addedSetting.getKey(), key);
Assertions.assertEquals(addedSetting.getValue(), value);
}
} |
do not add this. an example of only a List is not meaningful for user. | public void testListDataProducts() {
networkAnalyticsManager.dataProducts().list();
} | public void testListDataProducts() {
networkAnalyticsManager.dataProducts().list();
} | class NetworkAnalyticsManagerTests extends TestBase {
private NetworkAnalyticsManager networkAnalyticsManager = null;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
networkAnalyticsManager = NetworkAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
}
@Test
@DoNotRecord(skipInPlayback = true)
} | class NetworkAnalyticsManagerTests extends TestBase {
private NetworkAnalyticsManager networkAnalyticsManager = null;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
networkAnalyticsManager = NetworkAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
}
@Test
@LiveOnly
} | |
use BASIC | public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
networkAnalyticsManager = NetworkAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
} | .withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) | public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
networkAnalyticsManager = NetworkAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
} | class NetworkAnalyticsManagerTests extends TestBase {
private NetworkAnalyticsManager networkAnalyticsManager = null;
@Override
@Test
@DoNotRecord(skipInPlayback = true)
public void testListDataProducts() {
networkAnalyticsManager.dataProducts().list();
}
} | class NetworkAnalyticsManagerTests extends TestBase {
private NetworkAnalyticsManager networkAnalyticsManager = null;
@Override
@Test
@LiveOnly
public void testListDataProducts() {
networkAnalyticsManager.dataProducts().list();
}
} |
Fixed in the new version. | public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
networkAnalyticsManager = NetworkAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
} | .withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) | public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
networkAnalyticsManager = NetworkAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
} | class NetworkAnalyticsManagerTests extends TestBase {
private NetworkAnalyticsManager networkAnalyticsManager = null;
@Override
@Test
@DoNotRecord(skipInPlayback = true)
public void testListDataProducts() {
networkAnalyticsManager.dataProducts().list();
}
} | class NetworkAnalyticsManagerTests extends TestBase {
private NetworkAnalyticsManager networkAnalyticsManager = null;
@Override
@Test
@LiveOnly
public void testListDataProducts() {
networkAnalyticsManager.dataProducts().list();
}
} |
curious; how assert for `getPort()` passes for different port numbers (line_108, line_124) | public void propertiesAndPortSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "host-name.com";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final String actualHostname = "actual-host-name.com";
final int port = 1000;
final ConnectionOptions actual
= new ConnectionOptions(fullyQualifiedNamespace, tokenCredential, CbsAuthorizationType.JSON_WEB_TOKEN,
scope, AmqpTransportType.AMQP, retryOptions, ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions,
verifyMode, productName, clientVersion, actualHostname, port);
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(ConnectionHandler.AMQPS_PORT, actual.getPort());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
assertEquals(actualHostname, actual.getHostname());
assertEquals(port, actual.getPort());
} | assertEquals(port, actual.getPort()); | public void propertiesAndPortSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "host-name.com";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final String actualHostname = "actual-host-name.com";
final int port = 1000;
final ConnectionOptions actual
= new ConnectionOptions(fullyQualifiedNamespace, tokenCredential, CbsAuthorizationType.JSON_WEB_TOKEN,
scope, AmqpTransportType.AMQP, retryOptions, ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions,
verifyMode, productName, clientVersion, actualHostname, port);
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
assertEquals(actualHostname, actual.getHostname());
assertEquals(port, actual.getPort());
} | class ConnectionOptionsTest {
@Mock
private TokenCredential tokenCredential;
@Mock
private Scheduler scheduler;
private AutoCloseable mocksCloseable;
@BeforeEach
public void beforeEach() {
mocksCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (mocksCloseable != null) {
mocksCloseable.close();
}
}
@Test
public void propertiesSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "servicebus.windows.net";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final ConnectionOptions actual = new ConnectionOptions(fullyQualifiedNamespace, tokenCredential,
CbsAuthorizationType.JSON_WEB_TOKEN, scope, AmqpTransportType.AMQP, retryOptions,
ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions, verifyMode, productName, clientVersion);
assertEquals(fullyQualifiedNamespace, actual.getHostname());
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(ConnectionHandler.AMQPS_PORT, actual.getPort());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
}
/**
* Verifies that the correct port and properties are set.
*/
@Test
} | class ConnectionOptionsTest {
@Mock
private TokenCredential tokenCredential;
@Mock
private Scheduler scheduler;
private AutoCloseable mocksCloseable;
@BeforeEach
public void beforeEach() {
mocksCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (mocksCloseable != null) {
mocksCloseable.close();
}
}
@Test
public void propertiesSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "servicebus.windows.net";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final ConnectionOptions actual = new ConnectionOptions(fullyQualifiedNamespace, tokenCredential,
CbsAuthorizationType.JSON_WEB_TOKEN, scope, AmqpTransportType.AMQP, retryOptions,
ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions, verifyMode, productName, clientVersion);
assertEquals(fullyQualifiedNamespace, actual.getHostname());
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(ConnectionHandler.AMQPS_PORT, actual.getPort());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
}
/**
* Verifies that the correct port and properties are set.
*/
@Test
} |
Lol. Good catch. ;) Also a test failure. | public void propertiesAndPortSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "host-name.com";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final String actualHostname = "actual-host-name.com";
final int port = 1000;
final ConnectionOptions actual
= new ConnectionOptions(fullyQualifiedNamespace, tokenCredential, CbsAuthorizationType.JSON_WEB_TOKEN,
scope, AmqpTransportType.AMQP, retryOptions, ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions,
verifyMode, productName, clientVersion, actualHostname, port);
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(ConnectionHandler.AMQPS_PORT, actual.getPort());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
assertEquals(actualHostname, actual.getHostname());
assertEquals(port, actual.getPort());
} | assertEquals(port, actual.getPort()); | public void propertiesAndPortSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "host-name.com";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final String actualHostname = "actual-host-name.com";
final int port = 1000;
final ConnectionOptions actual
= new ConnectionOptions(fullyQualifiedNamespace, tokenCredential, CbsAuthorizationType.JSON_WEB_TOKEN,
scope, AmqpTransportType.AMQP, retryOptions, ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions,
verifyMode, productName, clientVersion, actualHostname, port);
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
assertEquals(actualHostname, actual.getHostname());
assertEquals(port, actual.getPort());
} | class ConnectionOptionsTest {
@Mock
private TokenCredential tokenCredential;
@Mock
private Scheduler scheduler;
private AutoCloseable mocksCloseable;
@BeforeEach
public void beforeEach() {
mocksCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (mocksCloseable != null) {
mocksCloseable.close();
}
}
@Test
public void propertiesSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "servicebus.windows.net";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final ConnectionOptions actual = new ConnectionOptions(fullyQualifiedNamespace, tokenCredential,
CbsAuthorizationType.JSON_WEB_TOKEN, scope, AmqpTransportType.AMQP, retryOptions,
ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions, verifyMode, productName, clientVersion);
assertEquals(fullyQualifiedNamespace, actual.getHostname());
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(ConnectionHandler.AMQPS_PORT, actual.getPort());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
}
/**
* Verifies that the correct port and properties are set.
*/
@Test
} | class ConnectionOptionsTest {
@Mock
private TokenCredential tokenCredential;
@Mock
private Scheduler scheduler;
private AutoCloseable mocksCloseable;
@BeforeEach
public void beforeEach() {
mocksCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (mocksCloseable != null) {
mocksCloseable.close();
}
}
@Test
public void propertiesSet() {
final String productName = "test-product";
final String clientVersion = "1.5.10";
final String scope = "test-scope";
final String fullyQualifiedNamespace = "servicebus.windows.net";
final SslDomain.VerifyMode verifyMode = SslDomain.VerifyMode.VERIFY_PEER;
final AmqpRetryOptions retryOptions = new AmqpRetryOptions();
final ClientOptions clientOptions = new ClientOptions();
final ConnectionOptions actual = new ConnectionOptions(fullyQualifiedNamespace, tokenCredential,
CbsAuthorizationType.JSON_WEB_TOKEN, scope, AmqpTransportType.AMQP, retryOptions,
ProxyOptions.SYSTEM_DEFAULTS, scheduler, clientOptions, verifyMode, productName, clientVersion);
assertEquals(fullyQualifiedNamespace, actual.getHostname());
assertEquals(fullyQualifiedNamespace, actual.getFullyQualifiedNamespace());
assertEquals(ConnectionHandler.AMQPS_PORT, actual.getPort());
assertEquals(productName, actual.getProduct());
assertEquals(clientVersion, actual.getClientVersion());
assertSame(clientOptions, actual.getClientOptions());
assertEquals(AmqpTransportType.AMQP, actual.getTransportType());
assertEquals(scheduler, actual.getScheduler());
assertEquals(tokenCredential, actual.getTokenCredential());
assertEquals(CbsAuthorizationType.JSON_WEB_TOKEN, actual.getAuthorizationType());
assertEquals(scope, actual.getAuthorizationScope());
assertEquals(retryOptions, actual.getRetry());
assertEquals(verifyMode, actual.getSslVerifyMode());
}
/**
* Verifies that the correct port and properties are set.
*/
@Test
} |
(in case there really are no errors) ```suggestion Set<ResponseError> responseErrors; try { return parseErrors(body); } catch (IllegalStateException e) { return singleton("Could not parse response"); } ``` | public Set<String> getErrorMessages() {
Set<ResponseError> responseErrors = getErrors();
if (responseErrors.isEmpty()) {
return singleton("Could not parse response");
}
return responseErrors.stream().map(ResponseError::getMessage).collect(Collectors.toSet());
} | } | public Set<String> getErrorMessages() {
Set<ResponseError> responseErrors;
try {
responseErrors = parseErrors(body);
} catch (IllegalStateException e) {
return singleton("Could not parse response");
}
return responseErrors.stream().map(ResponseError::getMessage).collect(Collectors.toSet());
} | class TelemetryPipelineResponse {
private static final String INVALID_INSTRUMENTATION_KEY = "Invalid instrumentation key";
private final int statusCode;
private final String body;
TelemetryPipelineResponse(int statusCode, String body) {
this.statusCode = statusCode;
this.body = body;
}
public int getStatusCode() {
return statusCode;
}
public String getBody() {
return body;
}
public Set<ResponseError> getErrors() {
try {
return parseErrors(body);
} catch (IllegalStateException e) {
return emptySet();
}
}
public boolean isInvalidInstrumentationKey() {
Set<String> errors = getErrorMessages();
return errors != null && errors.contains(INVALID_INSTRUMENTATION_KEY);
}
static Set<ResponseError> parseErrors(String body) {
JsonNode jsonNode;
try {
jsonNode = new ObjectMapper().readTree(body);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Failed to parse response body", e);
}
List<JsonNode> errorNodes = new ArrayList<>();
jsonNode.get("errors").forEach(errorNodes::add);
return errorNodes.stream()
.map(errorNode -> new ResponseError(errorNode.get("index").asInt(), errorNode.get("statusCode").asInt(), errorNode.get("message").asText()))
.filter(s -> !s.getMessage().equals("Telemetry sampled out."))
.collect(Collectors.toSet());
}
} | class TelemetryPipelineResponse {
private static final String INVALID_INSTRUMENTATION_KEY = "Invalid instrumentation key";
private final int statusCode;
private final String body;
TelemetryPipelineResponse(int statusCode, String body) {
this.statusCode = statusCode;
this.body = body;
}
public int getStatusCode() {
return statusCode;
}
public String getBody() {
return body;
}
public Set<ResponseError> getErrors() {
try {
return parseErrors(body);
} catch (IllegalStateException e) {
return emptySet();
}
}
public boolean isInvalidInstrumentationKey() {
Set<String> errors = getErrorMessages();
return errors != null && errors.contains(INVALID_INSTRUMENTATION_KEY);
}
static Set<ResponseError> parseErrors(String body) {
JsonNode jsonNode;
try {
jsonNode = new ObjectMapper().readTree(body);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Failed to parse response body", e);
}
List<JsonNode> errorNodes = new ArrayList<>();
jsonNode.get("errors").forEach(errorNodes::add);
return errorNodes.stream()
.map(errorNode -> new ResponseError(errorNode.get("index").asInt(), errorNode.get("statusCode").asInt(), errorNode.get("message").asText()))
.filter(s -> !s.getMessage().equals("Telemetry sampled out."))
.collect(Collectors.toSet());
}
} |
suspect the linter will catch this but if not. ```suggestion || (clientSecret != null && clientCertificatePath != null) ``` | public OnBehalfOfCredential build() {
ValidationUtil.validate(CLASS_NAME, LOGGER, "clientId", clientId, "tenantId", tenantId);
if ((clientSecret == null && clientCertificatePath == null && clientAssertionSupplier == null)
||(clientSecret != null && clientCertificatePath != null)
|| (clientSecret != null && clientAssertionSupplier != null)
|| (clientCertificatePath != null && clientAssertionSupplier != null)) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("Exactly one of client secret, "
+ "client certificate path, or client assertion supplier must be provided "
+ "in OnBehalfOfCredentialBuilder."));
}
return new OnBehalfOfCredential(clientId, tenantId, clientSecret, clientCertificatePath,
clientCertificatePassword, clientAssertionSupplier, identityClientOptions);
} | ||(clientSecret != null && clientCertificatePath != null) | public OnBehalfOfCredential build() {
ValidationUtil.validate(CLASS_NAME, LOGGER, "clientId", clientId, "tenantId", tenantId);
if ((clientSecret == null && clientCertificatePath == null && clientAssertionSupplier == null)
|| (clientSecret != null && clientCertificatePath != null)
|| (clientSecret != null && clientAssertionSupplier != null)
|| (clientCertificatePath != null && clientAssertionSupplier != null)) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("Exactly one of client secret, "
+ "client certificate path, or client assertion supplier must be provided "
+ "in OnBehalfOfCredentialBuilder."));
}
return new OnBehalfOfCredential(clientId, tenantId, clientSecret, clientCertificatePath,
clientCertificatePassword, clientAssertionSupplier, identityClientOptions);
} | class OnBehalfOfCredentialBuilder extends AadCredentialBuilderBase<OnBehalfOfCredentialBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(OnBehalfOfCredentialBuilder.class);
private static final String CLASS_NAME = OnBehalfOfCredentialBuilder.class.getSimpleName();
private String clientSecret;
private String clientCertificatePath;
private String clientCertificatePassword;
private Supplier<String> clientAssertionSupplier;
/**
* Constructs an instance of OnBehalfOfCredentialBuilder.
*/
public OnBehalfOfCredentialBuilder() {
super();
}
/**
* Sets the client secret for the authentication.
* @param clientSecret the secret value of the Microsoft Entra application.
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder clientSecret(String clientSecret) {
this.clientSecret = clientSecret;
return this;
}
/**
* Configures the persistent shared token cache options and enables the persistent token cache which is disabled
* by default. If configured, the credential will store tokens in a cache persisted to the machine, protected to
* the current user, which can be shared by other credentials and processes.
*
* @param tokenCachePersistenceOptions the token cache configuration options
* @return An updated instance of this builder with the token cache options configured.
*/
public OnBehalfOfCredentialBuilder tokenCachePersistenceOptions(TokenCachePersistenceOptions
tokenCachePersistenceOptions) {
this.identityClientOptions.setTokenCacheOptions(tokenCachePersistenceOptions);
return this;
}
/**
* Sets the path of the PEM certificate for authenticating to Microsoft Entra ID.
*
* @param pemCertificatePath the PEM file containing the certificate
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder pemCertificate(String pemCertificatePath) {
this.clientCertificatePath = pemCertificatePath;
return this;
}
/**
* Sets the path and password of the PFX certificate for authenticating to Microsoft Entra ID.
*
* @param pfxCertificatePath the password protected PFX file containing the certificate
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder pfxCertificate(String pfxCertificatePath) {
this.clientCertificatePath = pfxCertificatePath;
return this;
}
/**
* Sets the password of the client certificate for authenticating to Microsoft Entra ID.
*
* @param clientCertificatePassword the password protecting the certificate
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder clientCertificatePassword(String clientCertificatePassword) {
this.clientCertificatePassword = clientCertificatePassword;
return this;
}
/**
* Specifies if the x5c claim (public key of the certificate) should be sent as part of the authentication request
* and enable subject name / issuer based authentication. The default value is false.
*
* @param sendCertificateChain the flag to indicate if certificate chain should be sent as part of authentication
* request.
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder sendCertificateChain(boolean sendCertificateChain) {
this.identityClientOptions.setIncludeX5c(sendCertificateChain);
return this;
}
/**
* Configure the User Assertion Scope to be used for OnBehalfOf Authentication request.
*
* @param userAssertion the user assertion access token to be used for On behalf Of authentication flow
* @return An updated instance of this builder with the user assertion scope configured.
*/
public OnBehalfOfCredentialBuilder userAssertion(String userAssertion) {
this.identityClientOptions.userAssertion(userAssertion);
return this;
}
/**
* Sets the supplier containing the logic to supply the client assertion when invoked.
*
* @param clientAssertionSupplier the supplier supplying client assertion.
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder clientAssertion(Supplier<String> clientAssertionSupplier) {
this.clientAssertionSupplier = clientAssertionSupplier;
return this;
}
/**
* Creates a new {@link OnBehalfOfCredential} with the current configurations.
*
* @return a {@link OnBehalfOfCredential} with the current configurations.
* @throws IllegalArgumentException if eiter both the client secret and certificate are configured or none of them
* are configured.
*/
} | class OnBehalfOfCredentialBuilder extends AadCredentialBuilderBase<OnBehalfOfCredentialBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(OnBehalfOfCredentialBuilder.class);
private static final String CLASS_NAME = OnBehalfOfCredentialBuilder.class.getSimpleName();
private String clientSecret;
private String clientCertificatePath;
private String clientCertificatePassword;
private Supplier<String> clientAssertionSupplier;
/**
* Constructs an instance of OnBehalfOfCredentialBuilder.
*/
public OnBehalfOfCredentialBuilder() {
super();
}
/**
* Sets the client secret for the authentication.
* @param clientSecret the secret value of the Microsoft Entra application.
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder clientSecret(String clientSecret) {
this.clientSecret = clientSecret;
return this;
}
/**
* Configures the persistent shared token cache options and enables the persistent token cache which is disabled
* by default. If configured, the credential will store tokens in a cache persisted to the machine, protected to
* the current user, which can be shared by other credentials and processes.
*
* @param tokenCachePersistenceOptions the token cache configuration options
* @return An updated instance of this builder with the token cache options configured.
*/
public OnBehalfOfCredentialBuilder tokenCachePersistenceOptions(TokenCachePersistenceOptions
tokenCachePersistenceOptions) {
this.identityClientOptions.setTokenCacheOptions(tokenCachePersistenceOptions);
return this;
}
/**
* Sets the path of the PEM certificate for authenticating to Microsoft Entra ID.
*
* @param pemCertificatePath the PEM file containing the certificate
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder pemCertificate(String pemCertificatePath) {
this.clientCertificatePath = pemCertificatePath;
return this;
}
/**
* Sets the path and password of the PFX certificate for authenticating to Microsoft Entra ID.
*
* @param pfxCertificatePath the password protected PFX file containing the certificate
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder pfxCertificate(String pfxCertificatePath) {
this.clientCertificatePath = pfxCertificatePath;
return this;
}
/**
* Sets the password of the client certificate for authenticating to Microsoft Entra ID.
*
* @param clientCertificatePassword the password protecting the certificate
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder clientCertificatePassword(String clientCertificatePassword) {
this.clientCertificatePassword = clientCertificatePassword;
return this;
}
/**
* Specifies if the x5c claim (public key of the certificate) should be sent as part of the authentication request
* and enable subject name / issuer based authentication. The default value is false.
*
* @param sendCertificateChain the flag to indicate if certificate chain should be sent as part of authentication
* request.
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder sendCertificateChain(boolean sendCertificateChain) {
this.identityClientOptions.setIncludeX5c(sendCertificateChain);
return this;
}
/**
* Configure the User Assertion Scope to be used for OnBehalfOf Authentication request.
*
* @param userAssertion the user assertion access token to be used for On behalf Of authentication flow
* @return An updated instance of this builder with the user assertion scope configured.
*/
public OnBehalfOfCredentialBuilder userAssertion(String userAssertion) {
this.identityClientOptions.userAssertion(userAssertion);
return this;
}
/**
* Sets the supplier containing the logic to supply the client assertion when invoked.
*
* @param clientAssertionSupplier the supplier supplying client assertion.
* @return An updated instance of this builder.
*/
public OnBehalfOfCredentialBuilder clientAssertion(Supplier<String> clientAssertionSupplier) {
this.clientAssertionSupplier = clientAssertionSupplier;
return this;
}
/**
* Creates a new {@link OnBehalfOfCredential} with the current configurations.
*
* @return a {@link OnBehalfOfCredential} with the current configurations.
* @throws IllegalArgumentException if eiter both the client secret and certificate are configured or none of them
* are configured.
*/
} |
Is it required? | public void testCreateApiCenterService() {
Service service = null;
try {
String serviceName = "service" + randomPadding();
service = apiCenterManager.services()
.define(serviceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withIdentity(new ManagedServiceIdentity().withType(ManagedServiceIdentityType.SYSTEM_ASSIGNED))
.withProperties(new ServiceProperties())
.create();
service.refresh();
Assertions.assertEquals(serviceName, service.name());
Assertions.assertEquals(serviceName, apiCenterManager.services().getById(service.id()).name());
Assertions.assertTrue(apiCenterManager.services().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (service != null) {
apiCenterManager.services().deleteById(service.id());
}
}
} | .withProperties(new ServiceProperties()) | public void testCreateApiCenterService() {
Service service = null;
try {
String serviceName = "service" + randomPadding();
service = apiCenterManager.services()
.define(serviceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
service.refresh();
Assertions.assertEquals(serviceName, service.name());
Assertions.assertEquals(serviceName, apiCenterManager.services().getById(service.id()).name());
Assertions.assertTrue(apiCenterManager.services().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (service != null) {
apiCenterManager.services().deleteById(service.id());
}
}
} | class ApiCenterManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApiCenterManager apiCenterManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
apiCenterManager = ApiCenterManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ApiCenterManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApiCenterManager apiCenterManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
apiCenterManager = ApiCenterManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Fixed in the new version. | public void testCreateApiCenterService() {
Service service = null;
try {
String serviceName = "service" + randomPadding();
service = apiCenterManager.services()
.define(serviceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withIdentity(new ManagedServiceIdentity().withType(ManagedServiceIdentityType.SYSTEM_ASSIGNED))
.withProperties(new ServiceProperties())
.create();
service.refresh();
Assertions.assertEquals(serviceName, service.name());
Assertions.assertEquals(serviceName, apiCenterManager.services().getById(service.id()).name());
Assertions.assertTrue(apiCenterManager.services().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (service != null) {
apiCenterManager.services().deleteById(service.id());
}
}
} | .withProperties(new ServiceProperties()) | public void testCreateApiCenterService() {
Service service = null;
try {
String serviceName = "service" + randomPadding();
service = apiCenterManager.services()
.define(serviceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
service.refresh();
Assertions.assertEquals(serviceName, service.name());
Assertions.assertEquals(serviceName, apiCenterManager.services().getById(service.id()).name());
Assertions.assertTrue(apiCenterManager.services().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (service != null) {
apiCenterManager.services().deleteById(service.id());
}
}
} | class ApiCenterManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApiCenterManager apiCenterManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
apiCenterManager = ApiCenterManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ApiCenterManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApiCenterManager apiCenterManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
apiCenterManager = ApiCenterManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
I am wondering should we use block() here? | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | database.createContainer(containerProperties).subscribe(); | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} |
+ 1 below as well | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | database.createContainer(containerProperties).subscribe(); | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} |
We can, but since this is just code snippet, I don't want customers to learn bad coding practices for async | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | database.createContainer(containerProperties).subscribe(); | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} |
Not related to this change as this logic is being copied. Couldn't this be computed ahead of time instead of each time this API is called? | String getObjectName() {
String[] pathParts = getObjectPath().split("/");
return pathParts[pathParts.length - 1];
} | String[] pathParts = getObjectPath().split("/"); | String getObjectName() {
return this.objectName;
} | class DataLakePathClient {
private static final ClientLogger LOGGER = new ClientLogger(DataLakePathClient.class);
final DataLakePathAsyncClient dataLakePathAsyncClient;
final BlockBlobClient blockBlobClient;
final AzureDataLakeStorageRestAPIImpl dataLakeStorage;
final AzureDataLakeStorageRestAPIImpl fileSystemDataLakeStorage;
final AzureDataLakeStorageRestAPIImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
private final DataLakeServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
final PathResourceType pathResourceType;
private final AzureSasCredential sasToken;
private final boolean isTokenCredentialAuthenticated;
DataLakePathClient(DataLakePathAsyncClient dataLakePathAsyncClient, BlockBlobClient blockBlobClient,
HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName,
String fileSystemName, String pathName, PathResourceType pathResourceType, AzureSasCredential sasToken,
CpkInfo customerProvidedKey, boolean isTokenCredentialAuthenticated) {
this.dataLakePathAsyncClient = dataLakePathAsyncClient;
this.blockBlobClient = blockBlobClient;
this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.pathResourceType = pathResourceType;
this.sasToken = sasToken;
this.dataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(blobUrl)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.fileSystemDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.version(serviceVersion.getVersion())
.buildClient();
this.customerProvidedKey = customerProvidedKey;
this.isTokenCredentialAuthenticated = isTokenCredentialAuthenticated;
}
/**
* Gets the URL of the storage account.
*
* @return the URL.
*/
String getAccountUrl() {
return dataLakeStorage.getUrl();
}
/**
* Gets the URL of the object represented by this client on the Data Lake service.
*
* @return the URL.
*/
String getPathUrl() {
return dataLakeStorage.getUrl() + "/" + fileSystemName + "/" + Utility.urlEncode(pathName);
}
/**
* Gets the associated account name.
*
* @return Account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the name of the File System in which this object lives.
*
* @return The name of the File System.
*/
public String getFileSystemName() {
return fileSystemName;
}
/**
* Gets the path of this object, not including the name of the resource itself.
*
* @return The path of the object.
*/
String getObjectPath() {
return pathName;
}
/**
* Gets the name of this object, not including its full path.
*
* @return The name of the object.
*/
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return dataLakeStorage.getHttpPipeline();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public DataLakeServiceVersion getServiceVersion() {
return serviceVersion;
}
AzureSasCredential getSasToken() {
return this.sasToken;
}
/**
* Gets the {@link CpkInfo} used to encrypt this path's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CustomerProvidedKey getCustomerProvidedKey() {
return new CustomerProvidedKey(customerProvidedKey.getEncryptionKey());
}
CpkInfo getCpkInfo() {
return this.customerProvidedKey;
}
boolean isTokenCredentialAuthenticated() {
return this.isTokenCredentialAuthenticated;
}
/**
* Creates a new {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the path,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*/
public DataLakePathClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new DataLakePathClient(dataLakePathAsyncClient,
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)),
getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getFileSystemName(), getObjectPath(), this.pathResourceType, getSasToken(),
finalCustomerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Creates a resource. By default, this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create() {
return create(false);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create
* <pre>
* boolean overwrite = true;
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param overwrite Whether to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(new DataLakePathCreateOptions().setRequestConditions(requestConditions), null, Context.NONE).getValue();
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* String permissions = "permissions";
* String umask = "umask";
*
* Response<PathInfo> response = client.createWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param permissions POSIX access permissions for the resource owner, the resource owning group, and others.
* @param umask Restricts permissions of the resource to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(String permissions, String umask, PathHttpHeaders headers,
Map<String, String> metadata, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setPermissions(permissions)
.setUmask(umask)
.setPathHttpHeaders(headers)
.setMetadata(metadata)
.setRequestConditions(requestConditions);
return createWithResponse(options, timeout, context);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Map<String, String> metadata = Collections.singletonMap&
* String permissions = "permissions";
* String umask = "umask";
* String owner = "rwx";
* String group = "r--";
* String leaseId = CoreUtils.randomUuid&
* Integer duration = 15;
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setOwner&
* .setGroup&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setMetadata&
* .setProposedLeaseId&
* .setLeaseDuration&
*
* Response<PathInfo> response = client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(DataLakePathCreateOptions options, Duration timeout, Context context) {
DataLakePathCreateOptions finalOptions = options == null ? new DataLakePathCreateOptions() : options;
DataLakeRequestConditions requestConditions = options.getRequestConditions() == null ? new DataLakeRequestConditions() : options.getRequestConditions();
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String acl = options.getAccessControlList() != null ? PathAccessControlEntry
.serializeList(options.getAccessControlList()) : null;
PathExpiryOptions expiryOptions = ModelHelper.setFieldsIfNull(options, pathResourceType);
String expiresOnString = null;
if (options.getScheduleDeletionOptions() != null && options.getScheduleDeletionOptions().getExpiresOn() != null) {
expiresOnString = DateTimeRfc1123.toRfc1123String(options.getScheduleDeletionOptions().getExpiresOn());
} else if (options.getScheduleDeletionOptions() != null && options.getScheduleDeletionOptions().getTimeToExpire() != null) {
expiresOnString = Long.toString(options.getScheduleDeletionOptions().getTimeToExpire().toMillis());
}
String finalExpiresOnString = expiresOnString;
Long leaseDuration = options.getLeaseDuration() != null ? Long.valueOf(options.getLeaseDuration()) : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
this.dataLakeStorage.getPaths().createWithResponse(null, null, pathResourceType, null, null, null,
finalOptions.getSourceLeaseId(), ModelHelper.buildMetadataString(finalOptions.getMetadata()),
finalOptions.getPermissions(), finalOptions.getUmask(), options.getOwner(), finalOptions.getGroup(),
acl, finalOptions.getProposedLeaseId(), leaseDuration, expiryOptions, finalExpiresOnString,
finalOptions.getEncryptionContext(), finalOptions.getPathHttpHeaders(), lac, mac, null,
customerProvidedKey, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified(),
response.getDeserializedHeaders().isXMsRequestServerEncrypted() != null,
response.getDeserializedHeaders().getXMsEncryptionKeySha256()));
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
* <pre>
* PathInfo pathInfo = client.createIfNotExists&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return {@link PathInfo} that contains information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo createIfNotExists() {
return createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null, null).getValue();
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* String permissions = "permissions";
* String umask = "umask";
* Map<String, String> metadata = Collections.singletonMap&
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setPathHttpHeaders&
* .setMetadata&
*
* Response<PathInfo> response = client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* metadata key or value, it must be removed or encoded.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A reactive {@link Response} signaling completion, whose {@link Response
* {@link PathInfo} containing information about the resource. If {@link Response}'s status code is 201, a new
* resource was successfully created. If status code is 409, a resource already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createIfNotExistsWithResponse(DataLakePathCreateOptions options, Duration timeout,
Context context) {
try {
return createWithResponse(options, timeout, context);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 409 && e.getErrorCode().equals(BlobErrorCode.RESOURCE_ALREADY_EXISTS.toString())) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Deletes paths under the resource if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
* <pre>
* client.create&
* boolean result = client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if the resource is successfully deleted, {@code false} if resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, null).getValue();
}
/**
* Deletes all paths under the specified resource if exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
*
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the resource
* was successfully deleted. If status code is 404, the resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
options = options == null ? new DataLakePathDeleteOptions() : options;
try {
Response<Void> response = this.deleteWithResponse(options.getIsRecursive(), options.getRequestConditions(),
timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(BlobErrorCode.RESOURCE_NOT_FOUND.toString())) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Package-private delete method for use by {@link DataLakeFileClient} and {@link DataLakeDirectoryClient}
*
* @param recursive Whether to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} containing status code and HTTP headers
*/
Response<Void> deleteWithResponse(Boolean recursive, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Boolean paginated = (getServiceVersion().ordinal() >= DataLakeServiceVersion.V2023_08_03.ordinal()
&& Boolean.TRUE.equals(recursive)
&& isTokenCredentialAuthenticated()) ? true : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsDeleteHeaders, Void>> operation = () -> {
String continuation = null;
ResponseBase<PathsDeleteHeaders, Void> lastResponse;
do {
lastResponse = this.dataLakeStorage.getPaths()
.deleteWithResponse(null, null, recursive, continuation, paginated, lac, mac, finalContext);
continuation = lastResponse.getHeaders().getValue(Transforms.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
return lastResponse;
};
ResponseBase<PathsDeleteHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return new SimpleResponse<>(response, null);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* client.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* client.setMetadataWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context), LOGGER);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
* <pre>
* client.setHttpHeaders&
* .setContentLanguage&
* .setContentType&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(PathHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<Void> response = client.setHttpHeadersWithResponse&
* .setContentLanguage&
* .setContentType&
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context), LOGGER);
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setAccessControlList(List<PathAccessControlEntry> accessControlList, String group, String owner) {
return setAccessControlListWithResponse(accessControlList, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setAccessControlListWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setAccessControlListWithResponse(List<PathAccessControlEntry> accessControlList,
String group, String owner, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(accessControlList, null, group, owner,
requestConditions, timeout, context);
}
Response<PathInfo> setAccessControlWithResponse(List<PathAccessControlEntry> accessControlList,
PathPermissions permissions, String group, String owner, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String permissionsString = permissions == null ? null : permissions.toString();
String accessControlListString =
accessControlList == null
? null
: PathAccessControlEntry.serializeList(accessControlList);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsSetAccessControlHeaders, Void>> operation = () -> this.dataLakeStorage.getPaths()
.setAccessControlWithResponse(null, owner, group, permissionsString, accessControlListString, null, lac,
mac, finalContext);
ResponseBase<PathsSetAccessControlHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified()));
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissions
* <pre>
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissions
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setPermissions(PathPermissions permissions, String group, String owner) {
return setPermissionsWithResponse(permissions, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setPermissionsWithResponse&
* timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setPermissionsWithResponse(PathPermissions permissions, String group, String owner,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(null, permissions, group, owner, requestConditions, timeout, context);
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.setAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult setAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return setAccessControlRecursiveWithResponse(new PathSetAccessControlRecursiveOptions(accessControlList), null,
Context.NONE).getValue();
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathSetAccessControlRecursiveOptions options =
* new PathSetAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathSetAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
PathSetAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(PathAccessControlEntry.serializeList(options.getAccessControlList()),
options.getProgressHandler(), PathSetAccessControlRecursiveMode.SET, options.getBatchSize(),
options.getMaxBatches(), options.isContinueOnFailure(), options.getContinuationToken(), timeout, context);
}
Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
String accessControlList, Consumer<Response<AccessControlChanges>> progressHandler,
PathSetAccessControlRecursiveMode mode, Integer batchSize, Integer maxBatches, Boolean continueOnFailure,
String continuationToken, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("accessControlList", accessControlList);
context = context == null ? Context.NONE : context;
Context contextFinal = context;
AtomicInteger directoriesSuccessfulCount = new AtomicInteger(0);
AtomicInteger filesSuccessfulCount = new AtomicInteger(0);
AtomicInteger failureCount = new AtomicInteger(0);
AtomicInteger batchesCount = new AtomicInteger(0);
try {
Callable<ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse>> operation = () ->
this.dataLakeStorage.getPaths().setAccessControlRecursiveWithResponse(mode, null,
continuationToken, continueOnFailure, batchSize, accessControlList, null, contextFinal);
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response =
StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return setAccessControlRecursiveWithResponseHelper(response, maxBatches, directoriesSuccessfulCount,
filesSuccessfulCount, failureCount, batchesCount, progressHandler, accessControlList, mode, batchSize,
continueOnFailure, continuationToken, null, timeout, contextFinal);
} catch (Exception e) {
if (e instanceof DataLakeStorageException) {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclRequestFailed((DataLakeStorageException) e,
continuationToken));
} else {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclFailed(e, continuationToken));
}
}
}
Response<AccessControlChangeResult> setAccessControlRecursiveWithResponseHelper(
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response,
Integer maxBatches, AtomicInteger directoriesSuccessfulCount, AtomicInteger filesSuccessfulCount,
AtomicInteger failureCount, AtomicInteger batchesCount,
Consumer<Response<AccessControlChanges>> progressHandler, String accessControlStr,
PathSetAccessControlRecursiveMode mode, Integer batchSize, Boolean continueOnFailure, String lastToken,
List<AccessControlChangeFailure> batchFailures, Duration timeout, Context context) {
batchesCount.incrementAndGet();
directoriesSuccessfulCount.addAndGet(response.getValue().getDirectoriesSuccessful());
filesSuccessfulCount.addAndGet(response.getValue().getFilesSuccessful());
failureCount.addAndGet(response.getValue().getFailureCount());
if (failureCount.get() > 0 && batchFailures == null) {
batchFailures = response.getValue().getFailedEntries()
.stream()
.map(aclFailedEntry -> new AccessControlChangeFailure()
.setDirectory(aclFailedEntry.getType().equals("DIRECTORY"))
.setName(aclFailedEntry.getName())
.setErrorMessage(aclFailedEntry.getErrorMessage())
).collect(Collectors.toList());
}
List<AccessControlChangeFailure> finalBatchFailures = batchFailures;
/*
Determine which token we should report/return/use next.
If there was a token present on the response (still processing and either no errors or forceFlag set),
use that one.
If there were no failures or force flag set and still nothing present, we are at the end, so use that.
If there were failures and no force flag set, use the last token (no token is returned in this case).
*/
String newToken = response.getDeserializedHeaders().getXMsContinuation();
String effectiveNextToken;
if (newToken != null && !newToken.isEmpty()) {
effectiveNextToken = newToken;
} else {
if (failureCount.get() == 0 || (continueOnFailure == null || continueOnFailure)) {
effectiveNextToken = newToken;
} else {
effectiveNextToken = lastToken;
}
}
if (progressHandler != null) {
AccessControlChanges changes = new AccessControlChanges();
changes.setContinuationToken(effectiveNextToken);
changes.setBatchFailures(
response.getValue().getFailedEntries()
.stream()
.map(aclFailedEntry -> new AccessControlChangeFailure()
.setDirectory(aclFailedEntry.getType().equals("DIRECTORY"))
.setName(aclFailedEntry.getName())
.setErrorMessage(aclFailedEntry.getErrorMessage())
).collect(Collectors.toList())
);
changes.setBatchCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(response.getValue().getDirectoriesSuccessful())
.setChangedFilesCount(response.getValue().getFilesSuccessful())
.setFailedChangesCount(response.getValue().getFailureCount()));
changes.setAggregateCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(directoriesSuccessfulCount.get())
.setChangedFilesCount(filesSuccessfulCount.get())
.setFailedChangesCount(failureCount.get()));
progressHandler.accept(
new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), changes,
response.getDeserializedHeaders()));
}
/*
Determine if we are finished either because there is no new continuation (failure or finished) token or we have
hit maxBatches.
*/
if ((newToken == null || newToken.isEmpty()) || (maxBatches != null && batchesCount.get() >= maxBatches)) {
AccessControlChangeResult result = new AccessControlChangeResult()
.setBatchFailures(batchFailures)
.setContinuationToken(effectiveNextToken)
.setCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(directoriesSuccessfulCount.get())
.setChangedFilesCount(filesSuccessfulCount.get())
.setFailedChangesCount(failureCount.get()));
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
result, response.getDeserializedHeaders());
}
try {
Callable<ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse>> operation =
() -> this.dataLakeStorage.getPaths().setAccessControlRecursiveWithResponse(mode, null, effectiveNextToken,
continueOnFailure, batchSize, accessControlStr, null, context);
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response2 =
StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return setAccessControlRecursiveWithResponseHelper(response2, maxBatches,
directoriesSuccessfulCount, filesSuccessfulCount, failureCount, batchesCount, progressHandler,
accessControlStr, mode, batchSize, continueOnFailure, effectiveNextToken, finalBatchFailures, timeout, context);
} catch (Exception e) {
if (e instanceof DataLakeStorageException) {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclRequestFailed((DataLakeStorageException) e,
effectiveNextToken));
} else {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclFailed(e, effectiveNextToken));
}
}
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
* <pre>
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.updateAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult updateAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return updateAccessControlRecursiveWithResponse(new PathUpdateAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathUpdateAccessControlRecursiveOptions options =
* new PathUpdateAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathUpdateAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> updateAccessControlRecursiveWithResponse(
PathUpdateAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.MODIFY, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), timeout, context);
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
* <pre>
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.removeAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult removeAccessControlRecursive(
List<PathRemoveAccessControlEntry> accessControlList) {
return removeAccessControlRecursiveWithResponse(new PathRemoveAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathRemoveAccessControlRecursiveOptions options =
* new PathRemoveAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathRemoveAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> removeAccessControlRecursiveWithResponse(
PathRemoveAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(PathRemoveAccessControlEntry.serializeList(
options.getAccessControlList()), options.getProgressHandler(), PathSetAccessControlRecursiveMode.REMOVE,
options.getBatchSize(), options.getMaxBatches(), options.isContinueOnFailure(),
options.getContinuationToken(), timeout, context);
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
* <pre>
* PathAccessControl response = client.getAccessControl&
* System.out.printf&
* PathAccessControlEntry.serializeList&
* response.getOwner&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathAccessControl getAccessControl() {
return getAccessControlWithResponse(false, null, null, Context.NONE).getValue();
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* boolean userPrincipalNameReturned = false;
*
* Response<PathAccessControl> response = client.getAccessControlWithResponse&
* requestConditions, timeout, new Context&
*
* PathAccessControl pac = response.getValue&
*
* System.out.printf&
* PathAccessControlEntry.serializeList&
* pac.getPermissions&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param userPrincipalNameReturned When true, user identity values returned as User Principal Names. When false,
* user identity values returned as Azure Active Directory Object IDs. Default value is false.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrincipalNameReturned,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsGetPropertiesHeaders, Void>> operation =
() -> this.dataLakeStorage.getPaths().getPropertiesWithResponse(null, null,
PathGetPropertiesAction.GET_ACCESS_CONTROL, userPrincipalNameReturned, lac, mac, finalContext);
ResponseBase<PathsGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathAccessControl(
PathAccessControlEntry.parseList(response.getDeserializedHeaders().getXMsAcl()),
PathPermissions.parseSymbolic(response.getDeserializedHeaders().getXMsPermissions()),
response.getDeserializedHeaders().getXMsGroup(), response.getDeserializedHeaders().getXMsOwner()));
}
Response<DataLakePathClient> renameWithResponseWithTimeout(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
destinationRequestConditions = destinationRequestConditions == null ? new DataLakeRequestConditions()
: destinationRequestConditions;
DataLakeRequestConditions finalSourceRequestConditions = sourceRequestConditions == null ? new DataLakeRequestConditions()
: sourceRequestConditions;
SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions()
.setSourceIfModifiedSince(finalSourceRequestConditions.getIfModifiedSince())
.setSourceIfUnmodifiedSince(finalSourceRequestConditions.getIfUnmodifiedSince())
.setSourceIfMatch(finalSourceRequestConditions.getIfMatch())
.setSourceIfNoneMatch(finalSourceRequestConditions.getIfNoneMatch());
LeaseAccessConditions destLac = new LeaseAccessConditions()
.setLeaseId(destinationRequestConditions.getLeaseId());
ModifiedAccessConditions destMac = new ModifiedAccessConditions()
.setIfMatch(destinationRequestConditions.getIfMatch())
.setIfNoneMatch(destinationRequestConditions.getIfNoneMatch())
.setIfModifiedSince(destinationRequestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(destinationRequestConditions.getIfUnmodifiedSince());
DataLakePathClient dataLakePathClient = getPathClient(destinationFileSystem, destinationPath);
String renameSource = "/" + this.getFileSystemName() + "/" + Utility.urlEncode(pathName);
String signature = null;
if (this.getSasToken() != null) {
if (this.getSasToken().getSignature().startsWith("?")) {
signature = this.getSasToken().getSignature().substring(1);
} else {
signature = this.getSasToken().getSignature();
}
}
String finalRenameSource = signature != null ? renameSource + "?" + signature : renameSource;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
dataLakePathClient.dataLakeStorage.getPaths().createWithResponse(null /* request id */, null /* timeout */,
null /* pathResourceType */, null /* continuation */, PathRenameMode.LEGACY, finalRenameSource,
finalSourceRequestConditions.getLeaseId(), null /* properties */, null /* permissions */,
null /* umask */, null /* owner */, null /* group */, null /* acl */, null /* proposedLeaseId */,
null /* leaseDuration */, null /* expiryOptions */, null /* expiresOn */,
null /* encryptionContext */, null /* pathHttpHeaders */, destLac, destMac, sourceConditions,
null /* cpkInfo */, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, dataLakePathClient);
}
/**
* Takes in a destination and creates a DataLakePathClient with a new path
* @param destinationFileSystem The destination file system
* @param destinationPath The destination path
* @return A DataLakePathClient
*/
DataLakePathClient getPathClient(String destinationFileSystem, String destinationPath) {
if (destinationFileSystem == null) {
destinationFileSystem = getFileSystemName();
}
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new DataLakePathClient(dataLakePathAsyncClient,
ModelHelper.prepareBuilderReplacePath(destinationFileSystem, destinationPath, getFileSystemName(), getHttpPipeline(), getServiceVersion(), getPathUrl()).buildBlockBlobClient(), getHttpPipeline(),
getAccountUrl(), serviceVersion, accountName, destinationFileSystem, destinationPath, pathResourceType,
sasToken, customerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
* <pre>
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&
*
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<PathProperties> response = client.getPropertiesWithResponse&
* new Context&
*
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Response<PathProperties> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;
PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.exists -->
*
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.existsWithResponse(timeout, context), LOGGER);
}
BlockBlobClient getBlockBlobClient() {
return blockBlobClient;
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(dataLakeServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues) {
return generateSas(dataLakeServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} | class DataLakePathClient {
private static final ClientLogger LOGGER = new ClientLogger(DataLakePathClient.class);
final DataLakePathAsyncClient dataLakePathAsyncClient;
final BlockBlobClient blockBlobClient;
final AzureDataLakeStorageRestAPIImpl dataLakeStorage;
final AzureDataLakeStorageRestAPIImpl fileSystemDataLakeStorage;
final AzureDataLakeStorageRestAPIImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
private final String objectName;
private final DataLakeServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
final PathResourceType pathResourceType;
private final AzureSasCredential sasToken;
private final boolean isTokenCredentialAuthenticated;
DataLakePathClient(DataLakePathAsyncClient dataLakePathAsyncClient, BlockBlobClient blockBlobClient,
HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName,
String fileSystemName, String pathName, PathResourceType pathResourceType, AzureSasCredential sasToken,
CpkInfo customerProvidedKey, boolean isTokenCredentialAuthenticated) {
this.dataLakePathAsyncClient = dataLakePathAsyncClient;
this.blockBlobClient = blockBlobClient;
this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.pathResourceType = pathResourceType;
this.sasToken = sasToken;
this.dataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(blobUrl)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.fileSystemDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.version(serviceVersion.getVersion())
.buildClient();
this.customerProvidedKey = customerProvidedKey;
this.isTokenCredentialAuthenticated = isTokenCredentialAuthenticated;
String[] pathParts = pathName.split("/");
this.objectName = pathParts[pathParts.length - 1];
}
/**
* Gets the URL of the storage account.
*
* @return the URL.
*/
String getAccountUrl() {
return dataLakeStorage.getUrl();
}
/**
* Gets the URL of the object represented by this client on the Data Lake service.
*
* @return the URL.
*/
String getPathUrl() {
return dataLakeStorage.getUrl() + "/" + fileSystemName + "/" + Utility.urlEncode(pathName);
}
/**
* Gets the associated account name.
*
* @return Account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the name of the File System in which this object lives.
*
* @return The name of the File System.
*/
public String getFileSystemName() {
return fileSystemName;
}
/**
* Gets the path of this object, not including the name of the resource itself.
*
* @return The path of the object.
*/
String getObjectPath() {
return pathName;
}
/**
* Gets the name of this object, not including its full path.
*
* @return The name of the object.
*/
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return dataLakeStorage.getHttpPipeline();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public DataLakeServiceVersion getServiceVersion() {
return serviceVersion;
}
AzureSasCredential getSasToken() {
return this.sasToken;
}
/**
* Gets the {@link CpkInfo} used to encrypt this path's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CustomerProvidedKey getCustomerProvidedKey() {
return new CustomerProvidedKey(customerProvidedKey.getEncryptionKey());
}
CpkInfo getCpkInfo() {
return this.customerProvidedKey;
}
boolean isTokenCredentialAuthenticated() {
return this.isTokenCredentialAuthenticated;
}
/**
* Creates a new {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the path,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*/
public DataLakePathClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new DataLakePathClient(dataLakePathAsyncClient,
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)),
getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getFileSystemName(), getObjectPath(), this.pathResourceType, getSasToken(),
finalCustomerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Creates a resource. By default, this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create() {
return create(false);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create
* <pre>
* boolean overwrite = true;
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param overwrite Whether to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(new DataLakePathCreateOptions().setRequestConditions(requestConditions), null, Context.NONE).getValue();
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* String permissions = "permissions";
* String umask = "umask";
*
* Response<PathInfo> response = client.createWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param permissions POSIX access permissions for the resource owner, the resource owning group, and others.
* @param umask Restricts permissions of the resource to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(String permissions, String umask, PathHttpHeaders headers,
Map<String, String> metadata, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setPermissions(permissions)
.setUmask(umask)
.setPathHttpHeaders(headers)
.setMetadata(metadata)
.setRequestConditions(requestConditions);
return createWithResponse(options, timeout, context);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Map<String, String> metadata = Collections.singletonMap&
* String permissions = "permissions";
* String umask = "umask";
* String owner = "rwx";
* String group = "r--";
* String leaseId = CoreUtils.randomUuid&
* Integer duration = 15;
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setOwner&
* .setGroup&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setMetadata&
* .setProposedLeaseId&
* .setLeaseDuration&
*
* Response<PathInfo> response = client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(DataLakePathCreateOptions options, Duration timeout, Context context) {
DataLakePathCreateOptions finalOptions = options == null ? new DataLakePathCreateOptions() : options;
DataLakeRequestConditions requestConditions = finalOptions.getRequestConditions() == null
? new DataLakeRequestConditions() : finalOptions.getRequestConditions();
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String acl = finalOptions.getAccessControlList() != null ? PathAccessControlEntry
.serializeList(finalOptions.getAccessControlList()) : null;
PathExpiryOptions expiryOptions = ModelHelper.setFieldsIfNull(finalOptions, pathResourceType);
String expiresOnString = null;
if (finalOptions.getScheduleDeletionOptions() != null && finalOptions.getScheduleDeletionOptions().getExpiresOn() != null) {
expiresOnString = DateTimeRfc1123.toRfc1123String(finalOptions.getScheduleDeletionOptions().getExpiresOn());
} else if (finalOptions.getScheduleDeletionOptions() != null && finalOptions.getScheduleDeletionOptions().getTimeToExpire() != null) {
expiresOnString = Long.toString(finalOptions.getScheduleDeletionOptions().getTimeToExpire().toMillis());
}
String finalExpiresOnString = expiresOnString;
Long leaseDuration = finalOptions.getLeaseDuration() != null ? Long.valueOf(finalOptions.getLeaseDuration()) : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
this.dataLakeStorage.getPaths().createWithResponse(null, null, pathResourceType, null, null, null,
finalOptions.getSourceLeaseId(), ModelHelper.buildMetadataString(finalOptions.getMetadata()),
finalOptions.getPermissions(), finalOptions.getUmask(), finalOptions.getOwner(),
finalOptions.getGroup(), acl, finalOptions.getProposedLeaseId(), leaseDuration, expiryOptions,
finalExpiresOnString, finalOptions.getEncryptionContext(), finalOptions.getPathHttpHeaders(), lac, mac,
null, customerProvidedKey, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(
response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified(),
response.getDeserializedHeaders().isXMsRequestServerEncrypted() != null,
response.getDeserializedHeaders().getXMsEncryptionKeySha256()));
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
* <pre>
* PathInfo pathInfo = client.createIfNotExists&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return {@link PathInfo} that contains information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo createIfNotExists() {
return createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null, null).getValue();
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* String permissions = "permissions";
* String umask = "umask";
* Map<String, String> metadata = Collections.singletonMap&
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setPathHttpHeaders&
* .setMetadata&
*
* Response<PathInfo> response = client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* metadata key or value, it must be removed or encoded.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A reactive {@link Response} signaling completion, whose {@link Response
* {@link PathInfo} containing information about the resource. If {@link Response}'s status code is 201, a new
* resource was successfully created. If status code is 409, a resource already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createIfNotExistsWithResponse(DataLakePathCreateOptions options, Duration timeout,
Context context) {
try {
options = options == null ? new DataLakePathCreateOptions() : options;
options.setRequestConditions(new DataLakeRequestConditions()
.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD));
return createWithResponse(options, timeout, context);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 409) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Deletes paths under the resource if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
* <pre>
* client.create&
* boolean result = client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if the resource is successfully deleted, {@code false} if resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, null).getValue();
}
/**
* Deletes all paths under the specified resource if exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
*
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the resource
* was successfully deleted. If status code is 404, the resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
options = options == null ? new DataLakePathDeleteOptions() : options;
try {
Response<Void> response = this.deleteWithResponse(options.getIsRecursive(), options.getRequestConditions(),
timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 404) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Package-private delete method for use by {@link DataLakeFileClient} and {@link DataLakeDirectoryClient}
*
* @param recursive Whether to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} containing status code and HTTP headers
*/
Response<Void> deleteWithResponse(Boolean recursive, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Boolean paginated = (getServiceVersion().ordinal() >= DataLakeServiceVersion.V2023_08_03.ordinal()
&& Boolean.TRUE.equals(recursive)
&& isTokenCredentialAuthenticated()) ? true : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsDeleteHeaders, Void>> operation = () -> {
String continuation = null;
ResponseBase<PathsDeleteHeaders, Void> lastResponse;
do {
lastResponse = this.dataLakeStorage.getPaths()
.deleteWithResponse(null, null, recursive, continuation, paginated, lac, mac, finalContext);
continuation = lastResponse.getHeaders().getValue(Transforms.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
return lastResponse;
};
ResponseBase<PathsDeleteHeaders, Void> response = sendRequest(operation, timeout, DataLakeStorageException.class);
return new SimpleResponse<>(response, null);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* client.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* client.setMetadataWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context), LOGGER);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
* <pre>
* client.setHttpHeaders&
* .setContentLanguage&
* .setContentType&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(PathHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<Void> response = client.setHttpHeadersWithResponse&
* .setContentLanguage&
* .setContentType&
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context), LOGGER);
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setAccessControlList(List<PathAccessControlEntry> accessControlList, String group, String owner) {
return setAccessControlListWithResponse(accessControlList, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setAccessControlListWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setAccessControlListWithResponse(List<PathAccessControlEntry> accessControlList,
String group, String owner, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(accessControlList, null, group, owner,
requestConditions, timeout, context);
}
Response<PathInfo> setAccessControlWithResponse(List<PathAccessControlEntry> accessControlList,
PathPermissions permissions, String group, String owner, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String permissionsString = permissions == null ? null : permissions.toString();
String accessControlListString =
accessControlList == null
? null
: PathAccessControlEntry.serializeList(accessControlList);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsSetAccessControlHeaders, Void>> operation = () -> this.dataLakeStorage.getPaths()
.setAccessControlWithResponse(null, owner, group, permissionsString, accessControlListString, null, lac,
mac, finalContext);
ResponseBase<PathsSetAccessControlHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified()));
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissions
* <pre>
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissions
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setPermissions(PathPermissions permissions, String group, String owner) {
return setPermissionsWithResponse(permissions, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setPermissionsWithResponse&
* timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setPermissionsWithResponse(PathPermissions permissions, String group, String owner,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(null, permissions, group, owner, requestConditions, timeout, context);
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.setAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult setAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return setAccessControlRecursiveWithResponse(new PathSetAccessControlRecursiveOptions(accessControlList), null,
Context.NONE).getValue();
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathSetAccessControlRecursiveOptions options =
* new PathSetAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathSetAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
PathSetAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.SET, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
* <pre>
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.updateAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult updateAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return updateAccessControlRecursiveWithResponse(new PathUpdateAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathUpdateAccessControlRecursiveOptions options =
* new PathUpdateAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathUpdateAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> updateAccessControlRecursiveWithResponse(
PathUpdateAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.MODIFY, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
* <pre>
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.removeAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult removeAccessControlRecursive(
List<PathRemoveAccessControlEntry> accessControlList) {
return removeAccessControlRecursiveWithResponse(new PathRemoveAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathRemoveAccessControlRecursiveOptions options =
* new PathRemoveAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathRemoveAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> removeAccessControlRecursiveWithResponse(
PathRemoveAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathRemoveAccessControlEntry.serializeList(options.getAccessControlList()),
options.getProgressHandler(), PathSetAccessControlRecursiveMode.REMOVE, options.getBatchSize(),
options.getMaxBatches(), options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
* <pre>
* PathAccessControl response = client.getAccessControl&
* System.out.printf&
* PathAccessControlEntry.serializeList&
* response.getOwner&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathAccessControl getAccessControl() {
return getAccessControlWithResponse(false, null, null, Context.NONE).getValue();
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* boolean userPrincipalNameReturned = false;
*
* Response<PathAccessControl> response = client.getAccessControlWithResponse&
* requestConditions, timeout, new Context&
*
* PathAccessControl pac = response.getValue&
*
* System.out.printf&
* PathAccessControlEntry.serializeList&
* pac.getPermissions&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param userPrincipalNameReturned When true, user identity values returned as User Principal Names. When false,
* user identity values returned as Azure Active Directory Object IDs. Default value is false.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrincipalNameReturned,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsGetPropertiesHeaders, Void>> operation =
() -> this.dataLakeStorage.getPaths().getPropertiesWithResponse(null, null,
PathGetPropertiesAction.GET_ACCESS_CONTROL, userPrincipalNameReturned, lac, mac, finalContext);
ResponseBase<PathsGetPropertiesHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathAccessControl(
PathAccessControlEntry.parseList(response.getDeserializedHeaders().getXMsAcl()),
PathPermissions.parseSymbolic(response.getDeserializedHeaders().getXMsPermissions()),
response.getDeserializedHeaders().getXMsGroup(), response.getDeserializedHeaders().getXMsOwner()));
}
Response<DataLakePathClient> renameWithResponseWithTimeout(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
destinationRequestConditions = destinationRequestConditions == null ? new DataLakeRequestConditions()
: destinationRequestConditions;
DataLakeRequestConditions finalSourceRequestConditions = sourceRequestConditions == null ? new DataLakeRequestConditions()
: sourceRequestConditions;
SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions()
.setSourceIfModifiedSince(finalSourceRequestConditions.getIfModifiedSince())
.setSourceIfUnmodifiedSince(finalSourceRequestConditions.getIfUnmodifiedSince())
.setSourceIfMatch(finalSourceRequestConditions.getIfMatch())
.setSourceIfNoneMatch(finalSourceRequestConditions.getIfNoneMatch());
LeaseAccessConditions destLac = new LeaseAccessConditions()
.setLeaseId(destinationRequestConditions.getLeaseId());
ModifiedAccessConditions destMac = new ModifiedAccessConditions()
.setIfMatch(destinationRequestConditions.getIfMatch())
.setIfNoneMatch(destinationRequestConditions.getIfNoneMatch())
.setIfModifiedSince(destinationRequestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(destinationRequestConditions.getIfUnmodifiedSince());
DataLakePathClient dataLakePathClient = getPathClient(destinationFileSystem, destinationPath);
String renameSource = "/" + this.getFileSystemName() + "/" + Utility.urlEncode(pathName);
String signature = null;
if (this.getSasToken() != null) {
if (this.getSasToken().getSignature().startsWith("?")) {
signature = this.getSasToken().getSignature().substring(1);
} else {
signature = this.getSasToken().getSignature();
}
}
String finalRenameSource = signature != null ? renameSource + "?" + signature : renameSource;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
dataLakePathClient.dataLakeStorage.getPaths().createWithResponse(null /* request id */, null /* timeout */,
null /* pathResourceType */, null /* continuation */, PathRenameMode.LEGACY, finalRenameSource,
finalSourceRequestConditions.getLeaseId(), null /* properties */, null /* permissions */,
null /* umask */, null /* owner */, null /* group */, null /* acl */, null /* proposedLeaseId */,
null /* leaseDuration */, null /* expiryOptions */, null /* expiresOn */,
null /* encryptionContext */, null /* pathHttpHeaders */, destLac, destMac, sourceConditions,
null /* cpkInfo */, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, dataLakePathClient);
}
/**
* Takes in a destination and creates a DataLakePathClient with a new path
* @param destinationFileSystem The destination file system
* @param destinationPath The destination path
* @return A DataLakePathClient
*/
DataLakePathClient getPathClient(String destinationFileSystem, String destinationPath) {
if (destinationFileSystem == null) {
destinationFileSystem = getFileSystemName();
}
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new DataLakePathClient(dataLakePathAsyncClient,
dataLakePathAsyncClient.prepareBuilderReplacePath(destinationFileSystem, destinationPath).buildBlockBlobClient(),
getHttpPipeline(), getAccountUrl(), serviceVersion, accountName, destinationFileSystem, destinationPath,
pathResourceType, sasToken, customerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
* <pre>
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&
*
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<PathProperties> response = client.getPropertiesWithResponse&
* new Context&
*
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Response<PathProperties> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;
PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.exists -->
*
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.existsWithResponse(timeout, context), LOGGER);
}
BlockBlobClient getBlockBlobClient() {
return blockBlobClient;
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(dataLakeServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues) {
return generateSas(dataLakeServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} |
Is there anything we could do to help share logic between the sync and async clients so when we make an update both are updated? | public Response<PathInfo> createWithResponse(DataLakePathCreateOptions options, Duration timeout, Context context) {
DataLakePathCreateOptions finalOptions = options == null ? new DataLakePathCreateOptions() : options;
DataLakeRequestConditions requestConditions = options.getRequestConditions() == null ? new DataLakeRequestConditions() : options.getRequestConditions();
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String acl = options.getAccessControlList() != null ? PathAccessControlEntry
.serializeList(options.getAccessControlList()) : null;
PathExpiryOptions expiryOptions = ModelHelper.setFieldsIfNull(options, pathResourceType);
String expiresOnString = null;
if (options.getScheduleDeletionOptions() != null && options.getScheduleDeletionOptions().getExpiresOn() != null) {
expiresOnString = DateTimeRfc1123.toRfc1123String(options.getScheduleDeletionOptions().getExpiresOn());
} else if (options.getScheduleDeletionOptions() != null && options.getScheduleDeletionOptions().getTimeToExpire() != null) {
expiresOnString = Long.toString(options.getScheduleDeletionOptions().getTimeToExpire().toMillis());
}
String finalExpiresOnString = expiresOnString;
Long leaseDuration = options.getLeaseDuration() != null ? Long.valueOf(options.getLeaseDuration()) : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
this.dataLakeStorage.getPaths().createWithResponse(null, null, pathResourceType, null, null, null,
finalOptions.getSourceLeaseId(), ModelHelper.buildMetadataString(finalOptions.getMetadata()),
finalOptions.getPermissions(), finalOptions.getUmask(), options.getOwner(), finalOptions.getGroup(),
acl, finalOptions.getProposedLeaseId(), leaseDuration, expiryOptions, finalExpiresOnString,
finalOptions.getEncryptionContext(), finalOptions.getPathHttpHeaders(), lac, mac, null,
customerProvidedKey, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified(),
response.getDeserializedHeaders().isXMsRequestServerEncrypted() != null,
response.getDeserializedHeaders().getXMsEncryptionKeySha256()));
} | LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId()); | public Response<PathInfo> createWithResponse(DataLakePathCreateOptions options, Duration timeout, Context context) {
DataLakePathCreateOptions finalOptions = options == null ? new DataLakePathCreateOptions() : options;
DataLakeRequestConditions requestConditions = finalOptions.getRequestConditions() == null
? new DataLakeRequestConditions() : finalOptions.getRequestConditions();
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String acl = finalOptions.getAccessControlList() != null ? PathAccessControlEntry
.serializeList(finalOptions.getAccessControlList()) : null;
PathExpiryOptions expiryOptions = ModelHelper.setFieldsIfNull(finalOptions, pathResourceType);
String expiresOnString = null;
if (finalOptions.getScheduleDeletionOptions() != null && finalOptions.getScheduleDeletionOptions().getExpiresOn() != null) {
expiresOnString = DateTimeRfc1123.toRfc1123String(finalOptions.getScheduleDeletionOptions().getExpiresOn());
} else if (finalOptions.getScheduleDeletionOptions() != null && finalOptions.getScheduleDeletionOptions().getTimeToExpire() != null) {
expiresOnString = Long.toString(finalOptions.getScheduleDeletionOptions().getTimeToExpire().toMillis());
}
String finalExpiresOnString = expiresOnString;
Long leaseDuration = finalOptions.getLeaseDuration() != null ? Long.valueOf(finalOptions.getLeaseDuration()) : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
this.dataLakeStorage.getPaths().createWithResponse(null, null, pathResourceType, null, null, null,
finalOptions.getSourceLeaseId(), ModelHelper.buildMetadataString(finalOptions.getMetadata()),
finalOptions.getPermissions(), finalOptions.getUmask(), finalOptions.getOwner(),
finalOptions.getGroup(), acl, finalOptions.getProposedLeaseId(), leaseDuration, expiryOptions,
finalExpiresOnString, finalOptions.getEncryptionContext(), finalOptions.getPathHttpHeaders(), lac, mac,
null, customerProvidedKey, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(
response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified(),
response.getDeserializedHeaders().isXMsRequestServerEncrypted() != null,
response.getDeserializedHeaders().getXMsEncryptionKeySha256()));
} | class DataLakePathClient {
private static final ClientLogger LOGGER = new ClientLogger(DataLakePathClient.class);
final DataLakePathAsyncClient dataLakePathAsyncClient;
final BlockBlobClient blockBlobClient;
final AzureDataLakeStorageRestAPIImpl dataLakeStorage;
final AzureDataLakeStorageRestAPIImpl fileSystemDataLakeStorage;
final AzureDataLakeStorageRestAPIImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
private final DataLakeServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
final PathResourceType pathResourceType;
private final AzureSasCredential sasToken;
private final boolean isTokenCredentialAuthenticated;
DataLakePathClient(DataLakePathAsyncClient dataLakePathAsyncClient, BlockBlobClient blockBlobClient,
HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName,
String fileSystemName, String pathName, PathResourceType pathResourceType, AzureSasCredential sasToken,
CpkInfo customerProvidedKey, boolean isTokenCredentialAuthenticated) {
this.dataLakePathAsyncClient = dataLakePathAsyncClient;
this.blockBlobClient = blockBlobClient;
this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.pathResourceType = pathResourceType;
this.sasToken = sasToken;
this.dataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(blobUrl)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.fileSystemDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.version(serviceVersion.getVersion())
.buildClient();
this.customerProvidedKey = customerProvidedKey;
this.isTokenCredentialAuthenticated = isTokenCredentialAuthenticated;
}
/**
* Gets the URL of the storage account.
*
* @return the URL.
*/
String getAccountUrl() {
return dataLakeStorage.getUrl();
}
/**
* Gets the URL of the object represented by this client on the Data Lake service.
*
* @return the URL.
*/
String getPathUrl() {
return dataLakeStorage.getUrl() + "/" + fileSystemName + "/" + Utility.urlEncode(pathName);
}
/**
* Gets the associated account name.
*
* @return Account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the name of the File System in which this object lives.
*
* @return The name of the File System.
*/
public String getFileSystemName() {
return fileSystemName;
}
/**
* Gets the path of this object, not including the name of the resource itself.
*
* @return The path of the object.
*/
String getObjectPath() {
return pathName;
}
/**
* Gets the name of this object, not including its full path.
*
* @return The name of the object.
*/
String getObjectName() {
String[] pathParts = getObjectPath().split("/");
return pathParts[pathParts.length - 1];
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return dataLakeStorage.getHttpPipeline();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public DataLakeServiceVersion getServiceVersion() {
return serviceVersion;
}
AzureSasCredential getSasToken() {
return this.sasToken;
}
/**
* Gets the {@link CpkInfo} used to encrypt this path's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CustomerProvidedKey getCustomerProvidedKey() {
return new CustomerProvidedKey(customerProvidedKey.getEncryptionKey());
}
CpkInfo getCpkInfo() {
return this.customerProvidedKey;
}
boolean isTokenCredentialAuthenticated() {
return this.isTokenCredentialAuthenticated;
}
/**
* Creates a new {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the path,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*/
public DataLakePathClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new DataLakePathClient(dataLakePathAsyncClient,
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)),
getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getFileSystemName(), getObjectPath(), this.pathResourceType, getSasToken(),
finalCustomerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Creates a resource. By default, this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create() {
return create(false);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create
* <pre>
* boolean overwrite = true;
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param overwrite Whether to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(new DataLakePathCreateOptions().setRequestConditions(requestConditions), null, Context.NONE).getValue();
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* String permissions = "permissions";
* String umask = "umask";
*
* Response<PathInfo> response = client.createWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param permissions POSIX access permissions for the resource owner, the resource owning group, and others.
* @param umask Restricts permissions of the resource to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(String permissions, String umask, PathHttpHeaders headers,
Map<String, String> metadata, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setPermissions(permissions)
.setUmask(umask)
.setPathHttpHeaders(headers)
.setMetadata(metadata)
.setRequestConditions(requestConditions);
return createWithResponse(options, timeout, context);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Map<String, String> metadata = Collections.singletonMap&
* String permissions = "permissions";
* String umask = "umask";
* String owner = "rwx";
* String group = "r--";
* String leaseId = CoreUtils.randomUuid&
* Integer duration = 15;
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setOwner&
* .setGroup&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setMetadata&
* .setProposedLeaseId&
* .setLeaseDuration&
*
* Response<PathInfo> response = client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
* <pre>
* PathInfo pathInfo = client.createIfNotExists&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return {@link PathInfo} that contains information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo createIfNotExists() {
return createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null, null).getValue();
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* String permissions = "permissions";
* String umask = "umask";
* Map<String, String> metadata = Collections.singletonMap&
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setPathHttpHeaders&
* .setMetadata&
*
* Response<PathInfo> response = client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* metadata key or value, it must be removed or encoded.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A reactive {@link Response} signaling completion, whose {@link Response
* {@link PathInfo} containing information about the resource. If {@link Response}'s status code is 201, a new
* resource was successfully created. If status code is 409, a resource already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createIfNotExistsWithResponse(DataLakePathCreateOptions options, Duration timeout,
Context context) {
try {
return createWithResponse(options, timeout, context);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 409 && e.getErrorCode().equals(BlobErrorCode.RESOURCE_ALREADY_EXISTS.toString())) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Deletes paths under the resource if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
* <pre>
* client.create&
* boolean result = client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if the resource is successfully deleted, {@code false} if resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, null).getValue();
}
/**
* Deletes all paths under the specified resource if exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
*
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the resource
* was successfully deleted. If status code is 404, the resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
options = options == null ? new DataLakePathDeleteOptions() : options;
try {
Response<Void> response = this.deleteWithResponse(options.getIsRecursive(), options.getRequestConditions(),
timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(BlobErrorCode.RESOURCE_NOT_FOUND.toString())) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Package-private delete method for use by {@link DataLakeFileClient} and {@link DataLakeDirectoryClient}
*
* @param recursive Whether to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} containing status code and HTTP headers
*/
Response<Void> deleteWithResponse(Boolean recursive, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Boolean paginated = (getServiceVersion().ordinal() >= DataLakeServiceVersion.V2023_08_03.ordinal()
&& Boolean.TRUE.equals(recursive)
&& isTokenCredentialAuthenticated()) ? true : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsDeleteHeaders, Void>> operation = () -> {
String continuation = null;
ResponseBase<PathsDeleteHeaders, Void> lastResponse;
do {
lastResponse = this.dataLakeStorage.getPaths()
.deleteWithResponse(null, null, recursive, continuation, paginated, lac, mac, finalContext);
continuation = lastResponse.getHeaders().getValue(Transforms.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
return lastResponse;
};
ResponseBase<PathsDeleteHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return new SimpleResponse<>(response, null);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* client.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* client.setMetadataWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context), LOGGER);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
* <pre>
* client.setHttpHeaders&
* .setContentLanguage&
* .setContentType&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(PathHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<Void> response = client.setHttpHeadersWithResponse&
* .setContentLanguage&
* .setContentType&
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context), LOGGER);
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setAccessControlList(List<PathAccessControlEntry> accessControlList, String group, String owner) {
return setAccessControlListWithResponse(accessControlList, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setAccessControlListWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setAccessControlListWithResponse(List<PathAccessControlEntry> accessControlList,
String group, String owner, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(accessControlList, null, group, owner,
requestConditions, timeout, context);
}
Response<PathInfo> setAccessControlWithResponse(List<PathAccessControlEntry> accessControlList,
PathPermissions permissions, String group, String owner, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String permissionsString = permissions == null ? null : permissions.toString();
String accessControlListString =
accessControlList == null
? null
: PathAccessControlEntry.serializeList(accessControlList);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsSetAccessControlHeaders, Void>> operation = () -> this.dataLakeStorage.getPaths()
.setAccessControlWithResponse(null, owner, group, permissionsString, accessControlListString, null, lac,
mac, finalContext);
ResponseBase<PathsSetAccessControlHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified()));
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissions
* <pre>
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissions
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setPermissions(PathPermissions permissions, String group, String owner) {
return setPermissionsWithResponse(permissions, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setPermissionsWithResponse&
* timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setPermissionsWithResponse(PathPermissions permissions, String group, String owner,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(null, permissions, group, owner, requestConditions, timeout, context);
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.setAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult setAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return setAccessControlRecursiveWithResponse(new PathSetAccessControlRecursiveOptions(accessControlList), null,
Context.NONE).getValue();
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathSetAccessControlRecursiveOptions options =
* new PathSetAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathSetAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
PathSetAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(PathAccessControlEntry.serializeList(options.getAccessControlList()),
options.getProgressHandler(), PathSetAccessControlRecursiveMode.SET, options.getBatchSize(),
options.getMaxBatches(), options.isContinueOnFailure(), options.getContinuationToken(), timeout, context);
}
Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
String accessControlList, Consumer<Response<AccessControlChanges>> progressHandler,
PathSetAccessControlRecursiveMode mode, Integer batchSize, Integer maxBatches, Boolean continueOnFailure,
String continuationToken, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("accessControlList", accessControlList);
context = context == null ? Context.NONE : context;
Context contextFinal = context;
AtomicInteger directoriesSuccessfulCount = new AtomicInteger(0);
AtomicInteger filesSuccessfulCount = new AtomicInteger(0);
AtomicInteger failureCount = new AtomicInteger(0);
AtomicInteger batchesCount = new AtomicInteger(0);
try {
Callable<ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse>> operation = () ->
this.dataLakeStorage.getPaths().setAccessControlRecursiveWithResponse(mode, null,
continuationToken, continueOnFailure, batchSize, accessControlList, null, contextFinal);
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response =
StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return setAccessControlRecursiveWithResponseHelper(response, maxBatches, directoriesSuccessfulCount,
filesSuccessfulCount, failureCount, batchesCount, progressHandler, accessControlList, mode, batchSize,
continueOnFailure, continuationToken, null, timeout, contextFinal);
} catch (Exception e) {
if (e instanceof DataLakeStorageException) {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclRequestFailed((DataLakeStorageException) e,
continuationToken));
} else {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclFailed(e, continuationToken));
}
}
}
Response<AccessControlChangeResult> setAccessControlRecursiveWithResponseHelper(
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response,
Integer maxBatches, AtomicInteger directoriesSuccessfulCount, AtomicInteger filesSuccessfulCount,
AtomicInteger failureCount, AtomicInteger batchesCount,
Consumer<Response<AccessControlChanges>> progressHandler, String accessControlStr,
PathSetAccessControlRecursiveMode mode, Integer batchSize, Boolean continueOnFailure, String lastToken,
List<AccessControlChangeFailure> batchFailures, Duration timeout, Context context) {
batchesCount.incrementAndGet();
directoriesSuccessfulCount.addAndGet(response.getValue().getDirectoriesSuccessful());
filesSuccessfulCount.addAndGet(response.getValue().getFilesSuccessful());
failureCount.addAndGet(response.getValue().getFailureCount());
if (failureCount.get() > 0 && batchFailures == null) {
batchFailures = response.getValue().getFailedEntries()
.stream()
.map(aclFailedEntry -> new AccessControlChangeFailure()
.setDirectory(aclFailedEntry.getType().equals("DIRECTORY"))
.setName(aclFailedEntry.getName())
.setErrorMessage(aclFailedEntry.getErrorMessage())
).collect(Collectors.toList());
}
List<AccessControlChangeFailure> finalBatchFailures = batchFailures;
/*
Determine which token we should report/return/use next.
If there was a token present on the response (still processing and either no errors or forceFlag set),
use that one.
If there were no failures or force flag set and still nothing present, we are at the end, so use that.
If there were failures and no force flag set, use the last token (no token is returned in this case).
*/
String newToken = response.getDeserializedHeaders().getXMsContinuation();
String effectiveNextToken;
if (newToken != null && !newToken.isEmpty()) {
effectiveNextToken = newToken;
} else {
if (failureCount.get() == 0 || (continueOnFailure == null || continueOnFailure)) {
effectiveNextToken = newToken;
} else {
effectiveNextToken = lastToken;
}
}
if (progressHandler != null) {
AccessControlChanges changes = new AccessControlChanges();
changes.setContinuationToken(effectiveNextToken);
changes.setBatchFailures(
response.getValue().getFailedEntries()
.stream()
.map(aclFailedEntry -> new AccessControlChangeFailure()
.setDirectory(aclFailedEntry.getType().equals("DIRECTORY"))
.setName(aclFailedEntry.getName())
.setErrorMessage(aclFailedEntry.getErrorMessage())
).collect(Collectors.toList())
);
changes.setBatchCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(response.getValue().getDirectoriesSuccessful())
.setChangedFilesCount(response.getValue().getFilesSuccessful())
.setFailedChangesCount(response.getValue().getFailureCount()));
changes.setAggregateCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(directoriesSuccessfulCount.get())
.setChangedFilesCount(filesSuccessfulCount.get())
.setFailedChangesCount(failureCount.get()));
progressHandler.accept(
new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), changes,
response.getDeserializedHeaders()));
}
/*
Determine if we are finished either because there is no new continuation (failure or finished) token or we have
hit maxBatches.
*/
if ((newToken == null || newToken.isEmpty()) || (maxBatches != null && batchesCount.get() >= maxBatches)) {
AccessControlChangeResult result = new AccessControlChangeResult()
.setBatchFailures(batchFailures)
.setContinuationToken(effectiveNextToken)
.setCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(directoriesSuccessfulCount.get())
.setChangedFilesCount(filesSuccessfulCount.get())
.setFailedChangesCount(failureCount.get()));
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
result, response.getDeserializedHeaders());
}
try {
Callable<ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse>> operation =
() -> this.dataLakeStorage.getPaths().setAccessControlRecursiveWithResponse(mode, null, effectiveNextToken,
continueOnFailure, batchSize, accessControlStr, null, context);
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response2 =
StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return setAccessControlRecursiveWithResponseHelper(response2, maxBatches,
directoriesSuccessfulCount, filesSuccessfulCount, failureCount, batchesCount, progressHandler,
accessControlStr, mode, batchSize, continueOnFailure, effectiveNextToken, finalBatchFailures, timeout, context);
} catch (Exception e) {
if (e instanceof DataLakeStorageException) {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclRequestFailed((DataLakeStorageException) e,
effectiveNextToken));
} else {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclFailed(e, effectiveNextToken));
}
}
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
* <pre>
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.updateAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult updateAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return updateAccessControlRecursiveWithResponse(new PathUpdateAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathUpdateAccessControlRecursiveOptions options =
* new PathUpdateAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathUpdateAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> updateAccessControlRecursiveWithResponse(
PathUpdateAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.MODIFY, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), timeout, context);
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
* <pre>
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.removeAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult removeAccessControlRecursive(
List<PathRemoveAccessControlEntry> accessControlList) {
return removeAccessControlRecursiveWithResponse(new PathRemoveAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathRemoveAccessControlRecursiveOptions options =
* new PathRemoveAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathRemoveAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> removeAccessControlRecursiveWithResponse(
PathRemoveAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(PathRemoveAccessControlEntry.serializeList(
options.getAccessControlList()), options.getProgressHandler(), PathSetAccessControlRecursiveMode.REMOVE,
options.getBatchSize(), options.getMaxBatches(), options.isContinueOnFailure(),
options.getContinuationToken(), timeout, context);
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
* <pre>
* PathAccessControl response = client.getAccessControl&
* System.out.printf&
* PathAccessControlEntry.serializeList&
* response.getOwner&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathAccessControl getAccessControl() {
return getAccessControlWithResponse(false, null, null, Context.NONE).getValue();
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* boolean userPrincipalNameReturned = false;
*
* Response<PathAccessControl> response = client.getAccessControlWithResponse&
* requestConditions, timeout, new Context&
*
* PathAccessControl pac = response.getValue&
*
* System.out.printf&
* PathAccessControlEntry.serializeList&
* pac.getPermissions&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param userPrincipalNameReturned When true, user identity values returned as User Principal Names. When false,
* user identity values returned as Azure Active Directory Object IDs. Default value is false.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrincipalNameReturned,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsGetPropertiesHeaders, Void>> operation =
() -> this.dataLakeStorage.getPaths().getPropertiesWithResponse(null, null,
PathGetPropertiesAction.GET_ACCESS_CONTROL, userPrincipalNameReturned, lac, mac, finalContext);
ResponseBase<PathsGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathAccessControl(
PathAccessControlEntry.parseList(response.getDeserializedHeaders().getXMsAcl()),
PathPermissions.parseSymbolic(response.getDeserializedHeaders().getXMsPermissions()),
response.getDeserializedHeaders().getXMsGroup(), response.getDeserializedHeaders().getXMsOwner()));
}
Response<DataLakePathClient> renameWithResponseWithTimeout(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
destinationRequestConditions = destinationRequestConditions == null ? new DataLakeRequestConditions()
: destinationRequestConditions;
DataLakeRequestConditions finalSourceRequestConditions = sourceRequestConditions == null ? new DataLakeRequestConditions()
: sourceRequestConditions;
SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions()
.setSourceIfModifiedSince(finalSourceRequestConditions.getIfModifiedSince())
.setSourceIfUnmodifiedSince(finalSourceRequestConditions.getIfUnmodifiedSince())
.setSourceIfMatch(finalSourceRequestConditions.getIfMatch())
.setSourceIfNoneMatch(finalSourceRequestConditions.getIfNoneMatch());
LeaseAccessConditions destLac = new LeaseAccessConditions()
.setLeaseId(destinationRequestConditions.getLeaseId());
ModifiedAccessConditions destMac = new ModifiedAccessConditions()
.setIfMatch(destinationRequestConditions.getIfMatch())
.setIfNoneMatch(destinationRequestConditions.getIfNoneMatch())
.setIfModifiedSince(destinationRequestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(destinationRequestConditions.getIfUnmodifiedSince());
DataLakePathClient dataLakePathClient = getPathClient(destinationFileSystem, destinationPath);
String renameSource = "/" + this.getFileSystemName() + "/" + Utility.urlEncode(pathName);
String signature = null;
if (this.getSasToken() != null) {
if (this.getSasToken().getSignature().startsWith("?")) {
signature = this.getSasToken().getSignature().substring(1);
} else {
signature = this.getSasToken().getSignature();
}
}
String finalRenameSource = signature != null ? renameSource + "?" + signature : renameSource;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
dataLakePathClient.dataLakeStorage.getPaths().createWithResponse(null /* request id */, null /* timeout */,
null /* pathResourceType */, null /* continuation */, PathRenameMode.LEGACY, finalRenameSource,
finalSourceRequestConditions.getLeaseId(), null /* properties */, null /* permissions */,
null /* umask */, null /* owner */, null /* group */, null /* acl */, null /* proposedLeaseId */,
null /* leaseDuration */, null /* expiryOptions */, null /* expiresOn */,
null /* encryptionContext */, null /* pathHttpHeaders */, destLac, destMac, sourceConditions,
null /* cpkInfo */, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, dataLakePathClient);
}
/**
* Takes in a destination and creates a DataLakePathClient with a new path
* @param destinationFileSystem The destination file system
* @param destinationPath The destination path
* @return A DataLakePathClient
*/
DataLakePathClient getPathClient(String destinationFileSystem, String destinationPath) {
if (destinationFileSystem == null) {
destinationFileSystem = getFileSystemName();
}
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new DataLakePathClient(dataLakePathAsyncClient,
ModelHelper.prepareBuilderReplacePath(destinationFileSystem, destinationPath, getFileSystemName(), getHttpPipeline(), getServiceVersion(), getPathUrl()).buildBlockBlobClient(), getHttpPipeline(),
getAccountUrl(), serviceVersion, accountName, destinationFileSystem, destinationPath, pathResourceType,
sasToken, customerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
* <pre>
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&
*
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<PathProperties> response = client.getPropertiesWithResponse&
* new Context&
*
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Response<PathProperties> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;
PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.exists -->
*
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.existsWithResponse(timeout, context), LOGGER);
}
BlockBlobClient getBlockBlobClient() {
return blockBlobClient;
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(dataLakeServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues) {
return generateSas(dataLakeServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} | class DataLakePathClient {
private static final ClientLogger LOGGER = new ClientLogger(DataLakePathClient.class);
final DataLakePathAsyncClient dataLakePathAsyncClient;
final BlockBlobClient blockBlobClient;
final AzureDataLakeStorageRestAPIImpl dataLakeStorage;
final AzureDataLakeStorageRestAPIImpl fileSystemDataLakeStorage;
final AzureDataLakeStorageRestAPIImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
private final String objectName;
private final DataLakeServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
final PathResourceType pathResourceType;
private final AzureSasCredential sasToken;
private final boolean isTokenCredentialAuthenticated;
DataLakePathClient(DataLakePathAsyncClient dataLakePathAsyncClient, BlockBlobClient blockBlobClient,
HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName,
String fileSystemName, String pathName, PathResourceType pathResourceType, AzureSasCredential sasToken,
CpkInfo customerProvidedKey, boolean isTokenCredentialAuthenticated) {
this.dataLakePathAsyncClient = dataLakePathAsyncClient;
this.blockBlobClient = blockBlobClient;
this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.pathResourceType = pathResourceType;
this.sasToken = sasToken;
this.dataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(blobUrl)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.fileSystemDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.version(serviceVersion.getVersion())
.buildClient();
this.customerProvidedKey = customerProvidedKey;
this.isTokenCredentialAuthenticated = isTokenCredentialAuthenticated;
String[] pathParts = pathName.split("/");
this.objectName = pathParts[pathParts.length - 1];
}
/**
* Gets the URL of the storage account.
*
* @return the URL.
*/
String getAccountUrl() {
return dataLakeStorage.getUrl();
}
/**
* Gets the URL of the object represented by this client on the Data Lake service.
*
* @return the URL.
*/
String getPathUrl() {
return dataLakeStorage.getUrl() + "/" + fileSystemName + "/" + Utility.urlEncode(pathName);
}
/**
* Gets the associated account name.
*
* @return Account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the name of the File System in which this object lives.
*
* @return The name of the File System.
*/
public String getFileSystemName() {
return fileSystemName;
}
/**
* Gets the path of this object, not including the name of the resource itself.
*
* @return The path of the object.
*/
String getObjectPath() {
return pathName;
}
/**
* Gets the name of this object, not including its full path.
*
* @return The name of the object.
*/
String getObjectName() {
return this.objectName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return dataLakeStorage.getHttpPipeline();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public DataLakeServiceVersion getServiceVersion() {
return serviceVersion;
}
AzureSasCredential getSasToken() {
return this.sasToken;
}
/**
* Gets the {@link CpkInfo} used to encrypt this path's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CustomerProvidedKey getCustomerProvidedKey() {
return new CustomerProvidedKey(customerProvidedKey.getEncryptionKey());
}
CpkInfo getCpkInfo() {
return this.customerProvidedKey;
}
boolean isTokenCredentialAuthenticated() {
return this.isTokenCredentialAuthenticated;
}
/**
* Creates a new {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the path,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*/
public DataLakePathClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new DataLakePathClient(dataLakePathAsyncClient,
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)),
getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getFileSystemName(), getObjectPath(), this.pathResourceType, getSasToken(),
finalCustomerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Creates a resource. By default, this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create() {
return create(false);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create
* <pre>
* boolean overwrite = true;
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param overwrite Whether to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(new DataLakePathCreateOptions().setRequestConditions(requestConditions), null, Context.NONE).getValue();
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* String permissions = "permissions";
* String umask = "umask";
*
* Response<PathInfo> response = client.createWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param permissions POSIX access permissions for the resource owner, the resource owning group, and others.
* @param umask Restricts permissions of the resource to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(String permissions, String umask, PathHttpHeaders headers,
Map<String, String> metadata, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setPermissions(permissions)
.setUmask(umask)
.setPathHttpHeaders(headers)
.setMetadata(metadata)
.setRequestConditions(requestConditions);
return createWithResponse(options, timeout, context);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Map<String, String> metadata = Collections.singletonMap&
* String permissions = "permissions";
* String umask = "umask";
* String owner = "rwx";
* String group = "r--";
* String leaseId = CoreUtils.randomUuid&
* Integer duration = 15;
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setOwner&
* .setGroup&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setMetadata&
* .setProposedLeaseId&
* .setLeaseDuration&
*
* Response<PathInfo> response = client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
* <pre>
* PathInfo pathInfo = client.createIfNotExists&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return {@link PathInfo} that contains information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo createIfNotExists() {
return createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null, null).getValue();
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* String permissions = "permissions";
* String umask = "umask";
* Map<String, String> metadata = Collections.singletonMap&
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setPathHttpHeaders&
* .setMetadata&
*
* Response<PathInfo> response = client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* metadata key or value, it must be removed or encoded.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A reactive {@link Response} signaling completion, whose {@link Response
* {@link PathInfo} containing information about the resource. If {@link Response}'s status code is 201, a new
* resource was successfully created. If status code is 409, a resource already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createIfNotExistsWithResponse(DataLakePathCreateOptions options, Duration timeout,
Context context) {
try {
options = options == null ? new DataLakePathCreateOptions() : options;
options.setRequestConditions(new DataLakeRequestConditions()
.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD));
return createWithResponse(options, timeout, context);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 409) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Deletes paths under the resource if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
* <pre>
* client.create&
* boolean result = client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if the resource is successfully deleted, {@code false} if resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, null).getValue();
}
/**
* Deletes all paths under the specified resource if exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
*
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the resource
* was successfully deleted. If status code is 404, the resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
options = options == null ? new DataLakePathDeleteOptions() : options;
try {
Response<Void> response = this.deleteWithResponse(options.getIsRecursive(), options.getRequestConditions(),
timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 404) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Package-private delete method for use by {@link DataLakeFileClient} and {@link DataLakeDirectoryClient}
*
* @param recursive Whether to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} containing status code and HTTP headers
*/
Response<Void> deleteWithResponse(Boolean recursive, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Boolean paginated = (getServiceVersion().ordinal() >= DataLakeServiceVersion.V2023_08_03.ordinal()
&& Boolean.TRUE.equals(recursive)
&& isTokenCredentialAuthenticated()) ? true : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsDeleteHeaders, Void>> operation = () -> {
String continuation = null;
ResponseBase<PathsDeleteHeaders, Void> lastResponse;
do {
lastResponse = this.dataLakeStorage.getPaths()
.deleteWithResponse(null, null, recursive, continuation, paginated, lac, mac, finalContext);
continuation = lastResponse.getHeaders().getValue(Transforms.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
return lastResponse;
};
ResponseBase<PathsDeleteHeaders, Void> response = sendRequest(operation, timeout, DataLakeStorageException.class);
return new SimpleResponse<>(response, null);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* client.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* client.setMetadataWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context), LOGGER);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
* <pre>
* client.setHttpHeaders&
* .setContentLanguage&
* .setContentType&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(PathHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<Void> response = client.setHttpHeadersWithResponse&
* .setContentLanguage&
* .setContentType&
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context), LOGGER);
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setAccessControlList(List<PathAccessControlEntry> accessControlList, String group, String owner) {
return setAccessControlListWithResponse(accessControlList, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setAccessControlListWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setAccessControlListWithResponse(List<PathAccessControlEntry> accessControlList,
String group, String owner, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(accessControlList, null, group, owner,
requestConditions, timeout, context);
}
Response<PathInfo> setAccessControlWithResponse(List<PathAccessControlEntry> accessControlList,
PathPermissions permissions, String group, String owner, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String permissionsString = permissions == null ? null : permissions.toString();
String accessControlListString =
accessControlList == null
? null
: PathAccessControlEntry.serializeList(accessControlList);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsSetAccessControlHeaders, Void>> operation = () -> this.dataLakeStorage.getPaths()
.setAccessControlWithResponse(null, owner, group, permissionsString, accessControlListString, null, lac,
mac, finalContext);
ResponseBase<PathsSetAccessControlHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified()));
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissions
* <pre>
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissions
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setPermissions(PathPermissions permissions, String group, String owner) {
return setPermissionsWithResponse(permissions, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setPermissionsWithResponse&
* timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setPermissionsWithResponse(PathPermissions permissions, String group, String owner,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(null, permissions, group, owner, requestConditions, timeout, context);
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.setAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult setAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return setAccessControlRecursiveWithResponse(new PathSetAccessControlRecursiveOptions(accessControlList), null,
Context.NONE).getValue();
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathSetAccessControlRecursiveOptions options =
* new PathSetAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathSetAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
PathSetAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.SET, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
* <pre>
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.updateAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult updateAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return updateAccessControlRecursiveWithResponse(new PathUpdateAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathUpdateAccessControlRecursiveOptions options =
* new PathUpdateAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathUpdateAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> updateAccessControlRecursiveWithResponse(
PathUpdateAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.MODIFY, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
* <pre>
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.removeAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult removeAccessControlRecursive(
List<PathRemoveAccessControlEntry> accessControlList) {
return removeAccessControlRecursiveWithResponse(new PathRemoveAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathRemoveAccessControlRecursiveOptions options =
* new PathRemoveAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathRemoveAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> removeAccessControlRecursiveWithResponse(
PathRemoveAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathRemoveAccessControlEntry.serializeList(options.getAccessControlList()),
options.getProgressHandler(), PathSetAccessControlRecursiveMode.REMOVE, options.getBatchSize(),
options.getMaxBatches(), options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
* <pre>
* PathAccessControl response = client.getAccessControl&
* System.out.printf&
* PathAccessControlEntry.serializeList&
* response.getOwner&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathAccessControl getAccessControl() {
return getAccessControlWithResponse(false, null, null, Context.NONE).getValue();
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* boolean userPrincipalNameReturned = false;
*
* Response<PathAccessControl> response = client.getAccessControlWithResponse&
* requestConditions, timeout, new Context&
*
* PathAccessControl pac = response.getValue&
*
* System.out.printf&
* PathAccessControlEntry.serializeList&
* pac.getPermissions&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param userPrincipalNameReturned When true, user identity values returned as User Principal Names. When false,
* user identity values returned as Azure Active Directory Object IDs. Default value is false.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrincipalNameReturned,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsGetPropertiesHeaders, Void>> operation =
() -> this.dataLakeStorage.getPaths().getPropertiesWithResponse(null, null,
PathGetPropertiesAction.GET_ACCESS_CONTROL, userPrincipalNameReturned, lac, mac, finalContext);
ResponseBase<PathsGetPropertiesHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathAccessControl(
PathAccessControlEntry.parseList(response.getDeserializedHeaders().getXMsAcl()),
PathPermissions.parseSymbolic(response.getDeserializedHeaders().getXMsPermissions()),
response.getDeserializedHeaders().getXMsGroup(), response.getDeserializedHeaders().getXMsOwner()));
}
Response<DataLakePathClient> renameWithResponseWithTimeout(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
destinationRequestConditions = destinationRequestConditions == null ? new DataLakeRequestConditions()
: destinationRequestConditions;
DataLakeRequestConditions finalSourceRequestConditions = sourceRequestConditions == null ? new DataLakeRequestConditions()
: sourceRequestConditions;
SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions()
.setSourceIfModifiedSince(finalSourceRequestConditions.getIfModifiedSince())
.setSourceIfUnmodifiedSince(finalSourceRequestConditions.getIfUnmodifiedSince())
.setSourceIfMatch(finalSourceRequestConditions.getIfMatch())
.setSourceIfNoneMatch(finalSourceRequestConditions.getIfNoneMatch());
LeaseAccessConditions destLac = new LeaseAccessConditions()
.setLeaseId(destinationRequestConditions.getLeaseId());
ModifiedAccessConditions destMac = new ModifiedAccessConditions()
.setIfMatch(destinationRequestConditions.getIfMatch())
.setIfNoneMatch(destinationRequestConditions.getIfNoneMatch())
.setIfModifiedSince(destinationRequestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(destinationRequestConditions.getIfUnmodifiedSince());
DataLakePathClient dataLakePathClient = getPathClient(destinationFileSystem, destinationPath);
String renameSource = "/" + this.getFileSystemName() + "/" + Utility.urlEncode(pathName);
String signature = null;
if (this.getSasToken() != null) {
if (this.getSasToken().getSignature().startsWith("?")) {
signature = this.getSasToken().getSignature().substring(1);
} else {
signature = this.getSasToken().getSignature();
}
}
String finalRenameSource = signature != null ? renameSource + "?" + signature : renameSource;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
dataLakePathClient.dataLakeStorage.getPaths().createWithResponse(null /* request id */, null /* timeout */,
null /* pathResourceType */, null /* continuation */, PathRenameMode.LEGACY, finalRenameSource,
finalSourceRequestConditions.getLeaseId(), null /* properties */, null /* permissions */,
null /* umask */, null /* owner */, null /* group */, null /* acl */, null /* proposedLeaseId */,
null /* leaseDuration */, null /* expiryOptions */, null /* expiresOn */,
null /* encryptionContext */, null /* pathHttpHeaders */, destLac, destMac, sourceConditions,
null /* cpkInfo */, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, dataLakePathClient);
}
/**
* Takes in a destination and creates a DataLakePathClient with a new path
* @param destinationFileSystem The destination file system
* @param destinationPath The destination path
* @return A DataLakePathClient
*/
DataLakePathClient getPathClient(String destinationFileSystem, String destinationPath) {
if (destinationFileSystem == null) {
destinationFileSystem = getFileSystemName();
}
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new DataLakePathClient(dataLakePathAsyncClient,
dataLakePathAsyncClient.prepareBuilderReplacePath(destinationFileSystem, destinationPath).buildBlockBlobClient(),
getHttpPipeline(), getAccountUrl(), serviceVersion, accountName, destinationFileSystem, destinationPath,
pathResourceType, sasToken, customerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
* <pre>
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&
*
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<PathProperties> response = client.getPropertiesWithResponse&
* new Context&
*
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Response<PathProperties> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;
PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.exists -->
*
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.existsWithResponse(timeout, context), LOGGER);
}
BlockBlobClient getBlockBlobClient() {
return blockBlobClient;
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(dataLakeServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues) {
return generateSas(dataLakeServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} |
Can try to address this in the follow-up PR | public Response<PathInfo> createWithResponse(DataLakePathCreateOptions options, Duration timeout, Context context) {
DataLakePathCreateOptions finalOptions = options == null ? new DataLakePathCreateOptions() : options;
DataLakeRequestConditions requestConditions = options.getRequestConditions() == null ? new DataLakeRequestConditions() : options.getRequestConditions();
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String acl = options.getAccessControlList() != null ? PathAccessControlEntry
.serializeList(options.getAccessControlList()) : null;
PathExpiryOptions expiryOptions = ModelHelper.setFieldsIfNull(options, pathResourceType);
String expiresOnString = null;
if (options.getScheduleDeletionOptions() != null && options.getScheduleDeletionOptions().getExpiresOn() != null) {
expiresOnString = DateTimeRfc1123.toRfc1123String(options.getScheduleDeletionOptions().getExpiresOn());
} else if (options.getScheduleDeletionOptions() != null && options.getScheduleDeletionOptions().getTimeToExpire() != null) {
expiresOnString = Long.toString(options.getScheduleDeletionOptions().getTimeToExpire().toMillis());
}
String finalExpiresOnString = expiresOnString;
Long leaseDuration = options.getLeaseDuration() != null ? Long.valueOf(options.getLeaseDuration()) : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
this.dataLakeStorage.getPaths().createWithResponse(null, null, pathResourceType, null, null, null,
finalOptions.getSourceLeaseId(), ModelHelper.buildMetadataString(finalOptions.getMetadata()),
finalOptions.getPermissions(), finalOptions.getUmask(), options.getOwner(), finalOptions.getGroup(),
acl, finalOptions.getProposedLeaseId(), leaseDuration, expiryOptions, finalExpiresOnString,
finalOptions.getEncryptionContext(), finalOptions.getPathHttpHeaders(), lac, mac, null,
customerProvidedKey, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified(),
response.getDeserializedHeaders().isXMsRequestServerEncrypted() != null,
response.getDeserializedHeaders().getXMsEncryptionKeySha256()));
} | LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId()); | public Response<PathInfo> createWithResponse(DataLakePathCreateOptions options, Duration timeout, Context context) {
DataLakePathCreateOptions finalOptions = options == null ? new DataLakePathCreateOptions() : options;
DataLakeRequestConditions requestConditions = finalOptions.getRequestConditions() == null
? new DataLakeRequestConditions() : finalOptions.getRequestConditions();
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String acl = finalOptions.getAccessControlList() != null ? PathAccessControlEntry
.serializeList(finalOptions.getAccessControlList()) : null;
PathExpiryOptions expiryOptions = ModelHelper.setFieldsIfNull(finalOptions, pathResourceType);
String expiresOnString = null;
if (finalOptions.getScheduleDeletionOptions() != null && finalOptions.getScheduleDeletionOptions().getExpiresOn() != null) {
expiresOnString = DateTimeRfc1123.toRfc1123String(finalOptions.getScheduleDeletionOptions().getExpiresOn());
} else if (finalOptions.getScheduleDeletionOptions() != null && finalOptions.getScheduleDeletionOptions().getTimeToExpire() != null) {
expiresOnString = Long.toString(finalOptions.getScheduleDeletionOptions().getTimeToExpire().toMillis());
}
String finalExpiresOnString = expiresOnString;
Long leaseDuration = finalOptions.getLeaseDuration() != null ? Long.valueOf(finalOptions.getLeaseDuration()) : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
this.dataLakeStorage.getPaths().createWithResponse(null, null, pathResourceType, null, null, null,
finalOptions.getSourceLeaseId(), ModelHelper.buildMetadataString(finalOptions.getMetadata()),
finalOptions.getPermissions(), finalOptions.getUmask(), finalOptions.getOwner(),
finalOptions.getGroup(), acl, finalOptions.getProposedLeaseId(), leaseDuration, expiryOptions,
finalExpiresOnString, finalOptions.getEncryptionContext(), finalOptions.getPathHttpHeaders(), lac, mac,
null, customerProvidedKey, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(
response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified(),
response.getDeserializedHeaders().isXMsRequestServerEncrypted() != null,
response.getDeserializedHeaders().getXMsEncryptionKeySha256()));
} | class DataLakePathClient {
private static final ClientLogger LOGGER = new ClientLogger(DataLakePathClient.class);
final DataLakePathAsyncClient dataLakePathAsyncClient;
final BlockBlobClient blockBlobClient;
final AzureDataLakeStorageRestAPIImpl dataLakeStorage;
final AzureDataLakeStorageRestAPIImpl fileSystemDataLakeStorage;
final AzureDataLakeStorageRestAPIImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
private final DataLakeServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
final PathResourceType pathResourceType;
private final AzureSasCredential sasToken;
private final boolean isTokenCredentialAuthenticated;
DataLakePathClient(DataLakePathAsyncClient dataLakePathAsyncClient, BlockBlobClient blockBlobClient,
HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName,
String fileSystemName, String pathName, PathResourceType pathResourceType, AzureSasCredential sasToken,
CpkInfo customerProvidedKey, boolean isTokenCredentialAuthenticated) {
this.dataLakePathAsyncClient = dataLakePathAsyncClient;
this.blockBlobClient = blockBlobClient;
this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.pathResourceType = pathResourceType;
this.sasToken = sasToken;
this.dataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(blobUrl)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.fileSystemDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.version(serviceVersion.getVersion())
.buildClient();
this.customerProvidedKey = customerProvidedKey;
this.isTokenCredentialAuthenticated = isTokenCredentialAuthenticated;
}
/**
* Gets the URL of the storage account.
*
* @return the URL.
*/
String getAccountUrl() {
return dataLakeStorage.getUrl();
}
/**
* Gets the URL of the object represented by this client on the Data Lake service.
*
* @return the URL.
*/
String getPathUrl() {
return dataLakeStorage.getUrl() + "/" + fileSystemName + "/" + Utility.urlEncode(pathName);
}
/**
* Gets the associated account name.
*
* @return Account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the name of the File System in which this object lives.
*
* @return The name of the File System.
*/
public String getFileSystemName() {
return fileSystemName;
}
/**
* Gets the path of this object, not including the name of the resource itself.
*
* @return The path of the object.
*/
String getObjectPath() {
return pathName;
}
/**
* Gets the name of this object, not including its full path.
*
* @return The name of the object.
*/
String getObjectName() {
String[] pathParts = getObjectPath().split("/");
return pathParts[pathParts.length - 1];
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return dataLakeStorage.getHttpPipeline();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public DataLakeServiceVersion getServiceVersion() {
return serviceVersion;
}
AzureSasCredential getSasToken() {
return this.sasToken;
}
/**
* Gets the {@link CpkInfo} used to encrypt this path's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CustomerProvidedKey getCustomerProvidedKey() {
return new CustomerProvidedKey(customerProvidedKey.getEncryptionKey());
}
CpkInfo getCpkInfo() {
return this.customerProvidedKey;
}
boolean isTokenCredentialAuthenticated() {
return this.isTokenCredentialAuthenticated;
}
/**
* Creates a new {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the path,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*/
public DataLakePathClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new DataLakePathClient(dataLakePathAsyncClient,
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)),
getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getFileSystemName(), getObjectPath(), this.pathResourceType, getSasToken(),
finalCustomerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Creates a resource. By default, this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create() {
return create(false);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create
* <pre>
* boolean overwrite = true;
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param overwrite Whether to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(new DataLakePathCreateOptions().setRequestConditions(requestConditions), null, Context.NONE).getValue();
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* String permissions = "permissions";
* String umask = "umask";
*
* Response<PathInfo> response = client.createWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param permissions POSIX access permissions for the resource owner, the resource owning group, and others.
* @param umask Restricts permissions of the resource to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(String permissions, String umask, PathHttpHeaders headers,
Map<String, String> metadata, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setPermissions(permissions)
.setUmask(umask)
.setPathHttpHeaders(headers)
.setMetadata(metadata)
.setRequestConditions(requestConditions);
return createWithResponse(options, timeout, context);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Map<String, String> metadata = Collections.singletonMap&
* String permissions = "permissions";
* String umask = "umask";
* String owner = "rwx";
* String group = "r--";
* String leaseId = CoreUtils.randomUuid&
* Integer duration = 15;
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setOwner&
* .setGroup&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setMetadata&
* .setProposedLeaseId&
* .setLeaseDuration&
*
* Response<PathInfo> response = client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
* <pre>
* PathInfo pathInfo = client.createIfNotExists&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return {@link PathInfo} that contains information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo createIfNotExists() {
return createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null, null).getValue();
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* String permissions = "permissions";
* String umask = "umask";
* Map<String, String> metadata = Collections.singletonMap&
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setPathHttpHeaders&
* .setMetadata&
*
* Response<PathInfo> response = client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* metadata key or value, it must be removed or encoded.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A reactive {@link Response} signaling completion, whose {@link Response
* {@link PathInfo} containing information about the resource. If {@link Response}'s status code is 201, a new
* resource was successfully created. If status code is 409, a resource already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createIfNotExistsWithResponse(DataLakePathCreateOptions options, Duration timeout,
Context context) {
try {
return createWithResponse(options, timeout, context);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 409 && e.getErrorCode().equals(BlobErrorCode.RESOURCE_ALREADY_EXISTS.toString())) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Deletes paths under the resource if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
* <pre>
* client.create&
* boolean result = client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if the resource is successfully deleted, {@code false} if resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, null).getValue();
}
/**
* Deletes all paths under the specified resource if exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
*
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the resource
* was successfully deleted. If status code is 404, the resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
options = options == null ? new DataLakePathDeleteOptions() : options;
try {
Response<Void> response = this.deleteWithResponse(options.getIsRecursive(), options.getRequestConditions(),
timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(BlobErrorCode.RESOURCE_NOT_FOUND.toString())) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Package-private delete method for use by {@link DataLakeFileClient} and {@link DataLakeDirectoryClient}
*
* @param recursive Whether to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} containing status code and HTTP headers
*/
Response<Void> deleteWithResponse(Boolean recursive, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Boolean paginated = (getServiceVersion().ordinal() >= DataLakeServiceVersion.V2023_08_03.ordinal()
&& Boolean.TRUE.equals(recursive)
&& isTokenCredentialAuthenticated()) ? true : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsDeleteHeaders, Void>> operation = () -> {
String continuation = null;
ResponseBase<PathsDeleteHeaders, Void> lastResponse;
do {
lastResponse = this.dataLakeStorage.getPaths()
.deleteWithResponse(null, null, recursive, continuation, paginated, lac, mac, finalContext);
continuation = lastResponse.getHeaders().getValue(Transforms.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
return lastResponse;
};
ResponseBase<PathsDeleteHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return new SimpleResponse<>(response, null);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* client.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* client.setMetadataWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context), LOGGER);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
* <pre>
* client.setHttpHeaders&
* .setContentLanguage&
* .setContentType&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(PathHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<Void> response = client.setHttpHeadersWithResponse&
* .setContentLanguage&
* .setContentType&
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context), LOGGER);
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setAccessControlList(List<PathAccessControlEntry> accessControlList, String group, String owner) {
return setAccessControlListWithResponse(accessControlList, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setAccessControlListWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setAccessControlListWithResponse(List<PathAccessControlEntry> accessControlList,
String group, String owner, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(accessControlList, null, group, owner,
requestConditions, timeout, context);
}
Response<PathInfo> setAccessControlWithResponse(List<PathAccessControlEntry> accessControlList,
PathPermissions permissions, String group, String owner, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String permissionsString = permissions == null ? null : permissions.toString();
String accessControlListString =
accessControlList == null
? null
: PathAccessControlEntry.serializeList(accessControlList);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsSetAccessControlHeaders, Void>> operation = () -> this.dataLakeStorage.getPaths()
.setAccessControlWithResponse(null, owner, group, permissionsString, accessControlListString, null, lac,
mac, finalContext);
ResponseBase<PathsSetAccessControlHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified()));
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissions
* <pre>
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissions
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setPermissions(PathPermissions permissions, String group, String owner) {
return setPermissionsWithResponse(permissions, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setPermissionsWithResponse&
* timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setPermissionsWithResponse(PathPermissions permissions, String group, String owner,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(null, permissions, group, owner, requestConditions, timeout, context);
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.setAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult setAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return setAccessControlRecursiveWithResponse(new PathSetAccessControlRecursiveOptions(accessControlList), null,
Context.NONE).getValue();
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathSetAccessControlRecursiveOptions options =
* new PathSetAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathSetAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
PathSetAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(PathAccessControlEntry.serializeList(options.getAccessControlList()),
options.getProgressHandler(), PathSetAccessControlRecursiveMode.SET, options.getBatchSize(),
options.getMaxBatches(), options.isContinueOnFailure(), options.getContinuationToken(), timeout, context);
}
Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
String accessControlList, Consumer<Response<AccessControlChanges>> progressHandler,
PathSetAccessControlRecursiveMode mode, Integer batchSize, Integer maxBatches, Boolean continueOnFailure,
String continuationToken, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("accessControlList", accessControlList);
context = context == null ? Context.NONE : context;
Context contextFinal = context;
AtomicInteger directoriesSuccessfulCount = new AtomicInteger(0);
AtomicInteger filesSuccessfulCount = new AtomicInteger(0);
AtomicInteger failureCount = new AtomicInteger(0);
AtomicInteger batchesCount = new AtomicInteger(0);
try {
Callable<ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse>> operation = () ->
this.dataLakeStorage.getPaths().setAccessControlRecursiveWithResponse(mode, null,
continuationToken, continueOnFailure, batchSize, accessControlList, null, contextFinal);
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response =
StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return setAccessControlRecursiveWithResponseHelper(response, maxBatches, directoriesSuccessfulCount,
filesSuccessfulCount, failureCount, batchesCount, progressHandler, accessControlList, mode, batchSize,
continueOnFailure, continuationToken, null, timeout, contextFinal);
} catch (Exception e) {
if (e instanceof DataLakeStorageException) {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclRequestFailed((DataLakeStorageException) e,
continuationToken));
} else {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclFailed(e, continuationToken));
}
}
}
Response<AccessControlChangeResult> setAccessControlRecursiveWithResponseHelper(
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response,
Integer maxBatches, AtomicInteger directoriesSuccessfulCount, AtomicInteger filesSuccessfulCount,
AtomicInteger failureCount, AtomicInteger batchesCount,
Consumer<Response<AccessControlChanges>> progressHandler, String accessControlStr,
PathSetAccessControlRecursiveMode mode, Integer batchSize, Boolean continueOnFailure, String lastToken,
List<AccessControlChangeFailure> batchFailures, Duration timeout, Context context) {
batchesCount.incrementAndGet();
directoriesSuccessfulCount.addAndGet(response.getValue().getDirectoriesSuccessful());
filesSuccessfulCount.addAndGet(response.getValue().getFilesSuccessful());
failureCount.addAndGet(response.getValue().getFailureCount());
if (failureCount.get() > 0 && batchFailures == null) {
batchFailures = response.getValue().getFailedEntries()
.stream()
.map(aclFailedEntry -> new AccessControlChangeFailure()
.setDirectory(aclFailedEntry.getType().equals("DIRECTORY"))
.setName(aclFailedEntry.getName())
.setErrorMessage(aclFailedEntry.getErrorMessage())
).collect(Collectors.toList());
}
List<AccessControlChangeFailure> finalBatchFailures = batchFailures;
/*
Determine which token we should report/return/use next.
If there was a token present on the response (still processing and either no errors or forceFlag set),
use that one.
If there were no failures or force flag set and still nothing present, we are at the end, so use that.
If there were failures and no force flag set, use the last token (no token is returned in this case).
*/
String newToken = response.getDeserializedHeaders().getXMsContinuation();
String effectiveNextToken;
if (newToken != null && !newToken.isEmpty()) {
effectiveNextToken = newToken;
} else {
if (failureCount.get() == 0 || (continueOnFailure == null || continueOnFailure)) {
effectiveNextToken = newToken;
} else {
effectiveNextToken = lastToken;
}
}
if (progressHandler != null) {
AccessControlChanges changes = new AccessControlChanges();
changes.setContinuationToken(effectiveNextToken);
changes.setBatchFailures(
response.getValue().getFailedEntries()
.stream()
.map(aclFailedEntry -> new AccessControlChangeFailure()
.setDirectory(aclFailedEntry.getType().equals("DIRECTORY"))
.setName(aclFailedEntry.getName())
.setErrorMessage(aclFailedEntry.getErrorMessage())
).collect(Collectors.toList())
);
changes.setBatchCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(response.getValue().getDirectoriesSuccessful())
.setChangedFilesCount(response.getValue().getFilesSuccessful())
.setFailedChangesCount(response.getValue().getFailureCount()));
changes.setAggregateCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(directoriesSuccessfulCount.get())
.setChangedFilesCount(filesSuccessfulCount.get())
.setFailedChangesCount(failureCount.get()));
progressHandler.accept(
new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), changes,
response.getDeserializedHeaders()));
}
/*
Determine if we are finished either because there is no new continuation (failure or finished) token or we have
hit maxBatches.
*/
if ((newToken == null || newToken.isEmpty()) || (maxBatches != null && batchesCount.get() >= maxBatches)) {
AccessControlChangeResult result = new AccessControlChangeResult()
.setBatchFailures(batchFailures)
.setContinuationToken(effectiveNextToken)
.setCounters(new AccessControlChangeCounters()
.setChangedDirectoriesCount(directoriesSuccessfulCount.get())
.setChangedFilesCount(filesSuccessfulCount.get())
.setFailedChangesCount(failureCount.get()));
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
result, response.getDeserializedHeaders());
}
try {
Callable<ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse>> operation =
() -> this.dataLakeStorage.getPaths().setAccessControlRecursiveWithResponse(mode, null, effectiveNextToken,
continueOnFailure, batchSize, accessControlStr, null, context);
ResponseBase<PathsSetAccessControlRecursiveHeaders, SetAccessControlRecursiveResponse> response2 =
StorageImplUtils.sendRequest(operation, timeout, DataLakeStorageException.class);
return setAccessControlRecursiveWithResponseHelper(response2, maxBatches,
directoriesSuccessfulCount, filesSuccessfulCount, failureCount, batchesCount, progressHandler,
accessControlStr, mode, batchSize, continueOnFailure, effectiveNextToken, finalBatchFailures, timeout, context);
} catch (Exception e) {
if (e instanceof DataLakeStorageException) {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclRequestFailed((DataLakeStorageException) e,
effectiveNextToken));
} else {
throw LOGGER.logExceptionAsError(ModelHelper.changeAclFailed(e, effectiveNextToken));
}
}
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
* <pre>
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.updateAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult updateAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return updateAccessControlRecursiveWithResponse(new PathUpdateAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathUpdateAccessControlRecursiveOptions options =
* new PathUpdateAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathUpdateAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> updateAccessControlRecursiveWithResponse(
PathUpdateAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.MODIFY, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), timeout, context);
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
* <pre>
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.removeAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult removeAccessControlRecursive(
List<PathRemoveAccessControlEntry> accessControlList) {
return removeAccessControlRecursiveWithResponse(new PathRemoveAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathRemoveAccessControlRecursiveOptions options =
* new PathRemoveAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathRemoveAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> removeAccessControlRecursiveWithResponse(
PathRemoveAccessControlRecursiveOptions options, Duration timeout, Context context) {
return setAccessControlRecursiveWithResponse(PathRemoveAccessControlEntry.serializeList(
options.getAccessControlList()), options.getProgressHandler(), PathSetAccessControlRecursiveMode.REMOVE,
options.getBatchSize(), options.getMaxBatches(), options.isContinueOnFailure(),
options.getContinuationToken(), timeout, context);
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
* <pre>
* PathAccessControl response = client.getAccessControl&
* System.out.printf&
* PathAccessControlEntry.serializeList&
* response.getOwner&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathAccessControl getAccessControl() {
return getAccessControlWithResponse(false, null, null, Context.NONE).getValue();
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* boolean userPrincipalNameReturned = false;
*
* Response<PathAccessControl> response = client.getAccessControlWithResponse&
* requestConditions, timeout, new Context&
*
* PathAccessControl pac = response.getValue&
*
* System.out.printf&
* PathAccessControlEntry.serializeList&
* pac.getPermissions&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param userPrincipalNameReturned When true, user identity values returned as User Principal Names. When false,
* user identity values returned as Azure Active Directory Object IDs. Default value is false.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrincipalNameReturned,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsGetPropertiesHeaders, Void>> operation =
() -> this.dataLakeStorage.getPaths().getPropertiesWithResponse(null, null,
PathGetPropertiesAction.GET_ACCESS_CONTROL, userPrincipalNameReturned, lac, mac, finalContext);
ResponseBase<PathsGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathAccessControl(
PathAccessControlEntry.parseList(response.getDeserializedHeaders().getXMsAcl()),
PathPermissions.parseSymbolic(response.getDeserializedHeaders().getXMsPermissions()),
response.getDeserializedHeaders().getXMsGroup(), response.getDeserializedHeaders().getXMsOwner()));
}
Response<DataLakePathClient> renameWithResponseWithTimeout(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
destinationRequestConditions = destinationRequestConditions == null ? new DataLakeRequestConditions()
: destinationRequestConditions;
DataLakeRequestConditions finalSourceRequestConditions = sourceRequestConditions == null ? new DataLakeRequestConditions()
: sourceRequestConditions;
SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions()
.setSourceIfModifiedSince(finalSourceRequestConditions.getIfModifiedSince())
.setSourceIfUnmodifiedSince(finalSourceRequestConditions.getIfUnmodifiedSince())
.setSourceIfMatch(finalSourceRequestConditions.getIfMatch())
.setSourceIfNoneMatch(finalSourceRequestConditions.getIfNoneMatch());
LeaseAccessConditions destLac = new LeaseAccessConditions()
.setLeaseId(destinationRequestConditions.getLeaseId());
ModifiedAccessConditions destMac = new ModifiedAccessConditions()
.setIfMatch(destinationRequestConditions.getIfMatch())
.setIfNoneMatch(destinationRequestConditions.getIfNoneMatch())
.setIfModifiedSince(destinationRequestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(destinationRequestConditions.getIfUnmodifiedSince());
DataLakePathClient dataLakePathClient = getPathClient(destinationFileSystem, destinationPath);
String renameSource = "/" + this.getFileSystemName() + "/" + Utility.urlEncode(pathName);
String signature = null;
if (this.getSasToken() != null) {
if (this.getSasToken().getSignature().startsWith("?")) {
signature = this.getSasToken().getSignature().substring(1);
} else {
signature = this.getSasToken().getSignature();
}
}
String finalRenameSource = signature != null ? renameSource + "?" + signature : renameSource;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
dataLakePathClient.dataLakeStorage.getPaths().createWithResponse(null /* request id */, null /* timeout */,
null /* pathResourceType */, null /* continuation */, PathRenameMode.LEGACY, finalRenameSource,
finalSourceRequestConditions.getLeaseId(), null /* properties */, null /* permissions */,
null /* umask */, null /* owner */, null /* group */, null /* acl */, null /* proposedLeaseId */,
null /* leaseDuration */, null /* expiryOptions */, null /* expiresOn */,
null /* encryptionContext */, null /* pathHttpHeaders */, destLac, destMac, sourceConditions,
null /* cpkInfo */, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, dataLakePathClient);
}
/**
* Takes in a destination and creates a DataLakePathClient with a new path
* @param destinationFileSystem The destination file system
* @param destinationPath The destination path
* @return A DataLakePathClient
*/
DataLakePathClient getPathClient(String destinationFileSystem, String destinationPath) {
if (destinationFileSystem == null) {
destinationFileSystem = getFileSystemName();
}
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new DataLakePathClient(dataLakePathAsyncClient,
ModelHelper.prepareBuilderReplacePath(destinationFileSystem, destinationPath, getFileSystemName(), getHttpPipeline(), getServiceVersion(), getPathUrl()).buildBlockBlobClient(), getHttpPipeline(),
getAccountUrl(), serviceVersion, accountName, destinationFileSystem, destinationPath, pathResourceType,
sasToken, customerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
* <pre>
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&
*
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<PathProperties> response = client.getPropertiesWithResponse&
* new Context&
*
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Response<PathProperties> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;
PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.exists -->
*
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.existsWithResponse(timeout, context), LOGGER);
}
BlockBlobClient getBlockBlobClient() {
return blockBlobClient;
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(dataLakeServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues) {
return generateSas(dataLakeServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} | class DataLakePathClient {
private static final ClientLogger LOGGER = new ClientLogger(DataLakePathClient.class);
final DataLakePathAsyncClient dataLakePathAsyncClient;
final BlockBlobClient blockBlobClient;
final AzureDataLakeStorageRestAPIImpl dataLakeStorage;
final AzureDataLakeStorageRestAPIImpl fileSystemDataLakeStorage;
final AzureDataLakeStorageRestAPIImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
private final String objectName;
private final DataLakeServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
final PathResourceType pathResourceType;
private final AzureSasCredential sasToken;
private final boolean isTokenCredentialAuthenticated;
DataLakePathClient(DataLakePathAsyncClient dataLakePathAsyncClient, BlockBlobClient blockBlobClient,
HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName,
String fileSystemName, String pathName, PathResourceType pathResourceType, AzureSasCredential sasToken,
CpkInfo customerProvidedKey, boolean isTokenCredentialAuthenticated) {
this.dataLakePathAsyncClient = dataLakePathAsyncClient;
this.blockBlobClient = blockBlobClient;
this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.pathResourceType = pathResourceType;
this.sasToken = sasToken;
this.dataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(blobUrl)
.fileSystem(fileSystemName)
.path(this.pathName)
.version(serviceVersion.getVersion())
.buildClient();
this.fileSystemDataLakeStorage = new AzureDataLakeStorageRestAPIImplBuilder()
.pipeline(pipeline)
.url(url)
.fileSystem(fileSystemName)
.version(serviceVersion.getVersion())
.buildClient();
this.customerProvidedKey = customerProvidedKey;
this.isTokenCredentialAuthenticated = isTokenCredentialAuthenticated;
String[] pathParts = pathName.split("/");
this.objectName = pathParts[pathParts.length - 1];
}
/**
* Gets the URL of the storage account.
*
* @return the URL.
*/
String getAccountUrl() {
return dataLakeStorage.getUrl();
}
/**
* Gets the URL of the object represented by this client on the Data Lake service.
*
* @return the URL.
*/
String getPathUrl() {
return dataLakeStorage.getUrl() + "/" + fileSystemName + "/" + Utility.urlEncode(pathName);
}
/**
* Gets the associated account name.
*
* @return Account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the name of the File System in which this object lives.
*
* @return The name of the File System.
*/
public String getFileSystemName() {
return fileSystemName;
}
/**
* Gets the path of this object, not including the name of the resource itself.
*
* @return The path of the object.
*/
String getObjectPath() {
return pathName;
}
/**
* Gets the name of this object, not including its full path.
*
* @return The name of the object.
*/
String getObjectName() {
return this.objectName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return dataLakeStorage.getHttpPipeline();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public DataLakeServiceVersion getServiceVersion() {
return serviceVersion;
}
AzureSasCredential getSasToken() {
return this.sasToken;
}
/**
* Gets the {@link CpkInfo} used to encrypt this path's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CustomerProvidedKey getCustomerProvidedKey() {
return new CustomerProvidedKey(customerProvidedKey.getEncryptionKey());
}
CpkInfo getCpkInfo() {
return this.customerProvidedKey;
}
boolean isTokenCredentialAuthenticated() {
return this.isTokenCredentialAuthenticated;
}
/**
* Creates a new {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the path,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakePathClient} with the specified {@code customerProvidedKey}.
*/
public DataLakePathClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new DataLakePathClient(dataLakePathAsyncClient,
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)),
getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getFileSystemName(), getObjectPath(), this.pathResourceType, getSasToken(),
finalCustomerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Creates a resource. By default, this method will not overwrite an existing path.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create() {
return create(false);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.create
* <pre>
* boolean overwrite = true;
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.create
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param overwrite Whether to overwrite, should data exist on the path.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo create(boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return createWithResponse(new DataLakePathCreateOptions().setRequestConditions(requestConditions), null, Context.NONE).getValue();
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* String permissions = "permissions";
* String umask = "umask";
*
* Response<PathInfo> response = client.createWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param permissions POSIX access permissions for the resource owner, the resource owning group, and others.
* @param umask Restricts permissions of the resource to be created.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createWithResponse(String permissions, String umask, PathHttpHeaders headers,
Map<String, String> metadata, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
DataLakePathCreateOptions options = new DataLakePathCreateOptions()
.setPermissions(permissions)
.setUmask(umask)
.setPathHttpHeaders(headers)
.setMetadata(metadata)
.setRequestConditions(requestConditions);
return createWithResponse(options, timeout, context);
}
/**
* Creates a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
* <pre>
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Map<String, String> metadata = Collections.singletonMap&
* String permissions = "permissions";
* String umask = "umask";
* String owner = "rwx";
* String group = "r--";
* String leaseId = CoreUtils.randomUuid&
* Integer duration = 15;
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setOwner&
* .setGroup&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setMetadata&
* .setProposedLeaseId&
* .setLeaseDuration&
*
* Response<PathInfo> response = client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing information about the created resource
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
* <pre>
* PathInfo pathInfo = client.createIfNotExists&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @return {@link PathInfo} that contains information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo createIfNotExists() {
return createIfNotExistsWithResponse(new DataLakePathCreateOptions(), null, null).getValue();
}
/**
* Creates a resource if a path does not exist.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* String permissions = "permissions";
* String umask = "umask";
* Map<String, String> metadata = Collections.singletonMap&
* DataLakePathCreateOptions options = new DataLakePathCreateOptions&
* .setPermissions&
* .setUmask&
* .setPathHttpHeaders&
* .setMetadata&
*
* Response<PathInfo> response = client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.createIfNotExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathCreateOptions}
* metadata key or value, it must be removed or encoded.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A reactive {@link Response} signaling completion, whose {@link Response
* {@link PathInfo} containing information about the resource. If {@link Response}'s status code is 201, a new
* resource was successfully created. If status code is 409, a resource already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> createIfNotExistsWithResponse(DataLakePathCreateOptions options, Duration timeout,
Context context) {
try {
options = options == null ? new DataLakePathCreateOptions() : options;
options.setRequestConditions(new DataLakeRequestConditions()
.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD));
return createWithResponse(options, timeout, context);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 409) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Deletes paths under the resource if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
* <pre>
* client.create&
* boolean result = client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if the resource is successfully deleted, {@code false} if resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, null).getValue();
}
/**
* Deletes all paths under the specified resource if exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
*
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the resource
* was successfully deleted. If status code is 404, the resource does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
options = options == null ? new DataLakePathDeleteOptions() : options;
try {
Response<Void> response = this.deleteWithResponse(options.getIsRecursive(), options.getRequestConditions(),
timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (DataLakeStorageException e) {
if (e.getStatusCode() == 404) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Package-private delete method for use by {@link DataLakeFileClient} and {@link DataLakeDirectoryClient}
*
* @param recursive Whether to delete all paths beneath the directory.
* @param requestConditions {@link DataLakeRequestConditions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} containing status code and HTTP headers
*/
Response<Void> deleteWithResponse(Boolean recursive, DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Boolean paginated = (getServiceVersion().ordinal() >= DataLakeServiceVersion.V2023_08_03.ordinal()
&& Boolean.TRUE.equals(recursive)
&& isTokenCredentialAuthenticated()) ? true : null;
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsDeleteHeaders, Void>> operation = () -> {
String continuation = null;
ResponseBase<PathsDeleteHeaders, Void> lastResponse;
do {
lastResponse = this.dataLakeStorage.getPaths()
.deleteWithResponse(null, null, recursive, continuation, paginated, lac, mac, finalContext);
continuation = lastResponse.getHeaders().getValue(Transforms.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
return lastResponse;
};
ResponseBase<PathsDeleteHeaders, Void> response = sendRequest(operation, timeout, DataLakeStorageException.class);
return new SimpleResponse<>(response, null);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* client.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a resource's metadata. The specified metadata in this method will replace existing metadata. If old
* values must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setMetadata
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* client.setMetadataWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context), LOGGER);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
* <pre>
* client.setHttpHeaders&
* .setContentLanguage&
* .setContentType&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(PathHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a resource's HTTP header properties. If only one HTTP header is updated, the others will all be erased.
* In order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<Void> response = client.setHttpHeadersWithResponse&
* .setContentLanguage&
* .setContentType&
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link PathHttpHeaders}
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context), LOGGER);
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setAccessControlList(List<PathAccessControlEntry> accessControlList, String group, String owner) {
return setAccessControlListWithResponse(accessControlList, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the access control list, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setAccessControlListWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList A list of {@link PathAccessControlEntry} objects.
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setAccessControlListWithResponse(List<PathAccessControlEntry> accessControlList,
String group, String owner, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(accessControlList, null, group, owner,
requestConditions, timeout, context);
}
Response<PathInfo> setAccessControlWithResponse(List<PathAccessControlEntry> accessControlList,
PathPermissions permissions, String group, String owner, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
String permissionsString = permissions == null ? null : permissions.toString();
String accessControlListString =
accessControlList == null
? null
: PathAccessControlEntry.serializeList(accessControlList);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsSetAccessControlHeaders, Void>> operation = () -> this.dataLakeStorage.getPaths()
.setAccessControlWithResponse(null, owner, group, permissionsString, accessControlListString, null, lac,
mac, finalContext);
ResponseBase<PathsSetAccessControlHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(),
response.getDeserializedHeaders().getLastModified()));
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissions
* <pre>
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* System.out.printf&
* .getLastModified&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissions
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @return The resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo setPermissions(PathPermissions permissions, String group, String owner) {
return setPermissionsWithResponse(permissions, group, owner, null, null, Context.NONE).getValue();
}
/**
* Changes the permissions, group and/or owner for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathPermissions permissions = new PathPermissions&
* .setGroup&
* .setOwner&
* .setOther&
* String group = "group";
* String owner = "owner";
*
* Response<PathInfo> response = client.setPermissionsWithResponse&
* timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param permissions {@link PathPermissions}
* @param group The group of the resource.
* @param owner The owner of the resource.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> setPermissionsWithResponse(PathPermissions permissions, String group, String owner,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return setAccessControlWithResponse(null, permissions, group, owner, requestConditions, timeout, context);
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
* <pre>
* PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.setAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult setAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return setAccessControlRecursiveWithResponse(new PathSetAccessControlRecursiveOptions(accessControlList), null,
Context.NONE).getValue();
}
/**
* Recursively sets the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathSetAccessControlRecursiveOptions options =
* new PathSetAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathSetAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> setAccessControlRecursiveWithResponse(
PathSetAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.SET, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
* <pre>
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.updateAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult updateAccessControlRecursive(List<PathAccessControlEntry> accessControlList) {
return updateAccessControlRecursiveWithResponse(new PathUpdateAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively updates the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathAccessControlEntry ownerEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
* .setExecutePermission&
*
* PathAccessControlEntry groupEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* PathAccessControlEntry otherEntry = new PathAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setPermissions&
*
* List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathUpdateAccessControlRecursiveOptions options =
* new PathUpdateAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathUpdateAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> updateAccessControlRecursiveWithResponse(
PathUpdateAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathAccessControlEntry.serializeList(options.getAccessControlList()), options.getProgressHandler(),
PathSetAccessControlRecursiveMode.MODIFY, options.getBatchSize(), options.getMaxBatches(),
options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
* <pre>
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* AccessControlChangeResult response = client.removeAccessControlRecursive&
*
* System.out.printf&
* response.getCounters&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive
*
* <p>For more information, see the
* <a href="https:
*
* @param accessControlList The POSIX access control list for the file or directory.
* @return The result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AccessControlChangeResult removeAccessControlRecursive(
List<PathRemoveAccessControlEntry> accessControlList) {
return removeAccessControlRecursiveWithResponse(new PathRemoveAccessControlRecursiveOptions(accessControlList),
null, Context.NONE).getValue();
}
/**
* Recursively removes the access control on a path and all subpaths.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* PathRemoveAccessControlEntry ownerEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry groupEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
*
* PathRemoveAccessControlEntry otherEntry = new PathRemoveAccessControlEntry&
* .setEntityId&
* .setAccessControlType&
* .setDefaultScope&
* List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
* pathAccessControlEntries.add&
*
* Integer batchSize = 2;
* Integer maxBatches = 10;
* boolean continueOnFailure = false;
* String continuationToken = null;
* Consumer<Response<AccessControlChanges>> progressHandler =
* response -> System.out.println&
*
* PathRemoveAccessControlRecursiveOptions options =
* new PathRemoveAccessControlRecursiveOptions&
* .setBatchSize&
* .setMaxBatches&
* .setContinueOnFailure&
* .setContinuationToken&
* .setProgressHandler&
*
* Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse&
* new Context&
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathRemoveAccessControlRecursiveOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the result of the operation.
*
* @throws DataLakeAclChangeFailedException if a request to storage throws a
* {@link DataLakeStorageException} or a {@link Exception} to wrap the exception with the continuation token.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<AccessControlChangeResult> removeAccessControlRecursiveWithResponse(
PathRemoveAccessControlRecursiveOptions options, Duration timeout, Context context) {
Mono<Response<AccessControlChangeResult>> response =
dataLakePathAsyncClient.setAccessControlRecursiveWithResponse(
PathRemoveAccessControlEntry.serializeList(options.getAccessControlList()),
options.getProgressHandler(), PathSetAccessControlRecursiveMode.REMOVE, options.getBatchSize(),
options.getMaxBatches(), options.isContinueOnFailure(), options.getContinuationToken(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
* <pre>
* PathAccessControl response = client.getAccessControl&
* System.out.printf&
* PathAccessControlEntry.serializeList&
* response.getOwner&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControl -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathAccessControl getAccessControl() {
return getAccessControlWithResponse(false, null, null, Context.NONE).getValue();
}
/**
* Returns the access control for a resource.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* boolean userPrincipalNameReturned = false;
*
* Response<PathAccessControl> response = client.getAccessControlWithResponse&
* requestConditions, timeout, new Context&
*
* PathAccessControl pac = response.getValue&
*
* System.out.printf&
* PathAccessControlEntry.serializeList&
* pac.getPermissions&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param userPrincipalNameReturned When true, user identity values returned as User Principal Names. When false,
* user identity values returned as Azure Active Directory Object IDs. Default value is false.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource access control.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrincipalNameReturned,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions;
LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId());
ModifiedAccessConditions mac = new ModifiedAccessConditions()
.setIfMatch(requestConditions.getIfMatch())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince());
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<PathsGetPropertiesHeaders, Void>> operation =
() -> this.dataLakeStorage.getPaths().getPropertiesWithResponse(null, null,
PathGetPropertiesAction.GET_ACCESS_CONTROL, userPrincipalNameReturned, lac, mac, finalContext);
ResponseBase<PathsGetPropertiesHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, new PathAccessControl(
PathAccessControlEntry.parseList(response.getDeserializedHeaders().getXMsAcl()),
PathPermissions.parseSymbolic(response.getDeserializedHeaders().getXMsPermissions()),
response.getDeserializedHeaders().getXMsGroup(), response.getDeserializedHeaders().getXMsOwner()));
}
Response<DataLakePathClient> renameWithResponseWithTimeout(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
destinationRequestConditions = destinationRequestConditions == null ? new DataLakeRequestConditions()
: destinationRequestConditions;
DataLakeRequestConditions finalSourceRequestConditions = sourceRequestConditions == null ? new DataLakeRequestConditions()
: sourceRequestConditions;
SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions()
.setSourceIfModifiedSince(finalSourceRequestConditions.getIfModifiedSince())
.setSourceIfUnmodifiedSince(finalSourceRequestConditions.getIfUnmodifiedSince())
.setSourceIfMatch(finalSourceRequestConditions.getIfMatch())
.setSourceIfNoneMatch(finalSourceRequestConditions.getIfNoneMatch());
LeaseAccessConditions destLac = new LeaseAccessConditions()
.setLeaseId(destinationRequestConditions.getLeaseId());
ModifiedAccessConditions destMac = new ModifiedAccessConditions()
.setIfMatch(destinationRequestConditions.getIfMatch())
.setIfNoneMatch(destinationRequestConditions.getIfNoneMatch())
.setIfModifiedSince(destinationRequestConditions.getIfModifiedSince())
.setIfUnmodifiedSince(destinationRequestConditions.getIfUnmodifiedSince());
DataLakePathClient dataLakePathClient = getPathClient(destinationFileSystem, destinationPath);
String renameSource = "/" + this.getFileSystemName() + "/" + Utility.urlEncode(pathName);
String signature = null;
if (this.getSasToken() != null) {
if (this.getSasToken().getSignature().startsWith("?")) {
signature = this.getSasToken().getSignature().substring(1);
} else {
signature = this.getSasToken().getSignature();
}
}
String finalRenameSource = signature != null ? renameSource + "?" + signature : renameSource;
Callable<ResponseBase<PathsCreateHeaders, Void>> operation = () ->
dataLakePathClient.dataLakeStorage.getPaths().createWithResponse(null /* request id */, null /* timeout */,
null /* pathResourceType */, null /* continuation */, PathRenameMode.LEGACY, finalRenameSource,
finalSourceRequestConditions.getLeaseId(), null /* properties */, null /* permissions */,
null /* umask */, null /* owner */, null /* group */, null /* acl */, null /* proposedLeaseId */,
null /* leaseDuration */, null /* expiryOptions */, null /* expiresOn */,
null /* encryptionContext */, null /* pathHttpHeaders */, destLac, destMac, sourceConditions,
null /* cpkInfo */, finalContext);
ResponseBase<PathsCreateHeaders, Void> response = sendRequest(operation, timeout,
DataLakeStorageException.class);
return new SimpleResponse<>(response, dataLakePathClient);
}
/**
* Takes in a destination and creates a DataLakePathClient with a new path
* @param destinationFileSystem The destination file system
* @param destinationPath The destination path
* @return A DataLakePathClient
*/
DataLakePathClient getPathClient(String destinationFileSystem, String destinationPath) {
if (destinationFileSystem == null) {
destinationFileSystem = getFileSystemName();
}
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new DataLakePathClient(dataLakePathAsyncClient,
dataLakePathAsyncClient.prepareBuilderReplacePath(destinationFileSystem, destinationPath).buildBlockBlobClient(),
getHttpPipeline(), getAccountUrl(), serviceVersion, accountName, destinationFileSystem, destinationPath,
pathResourceType, sasToken, customerProvidedKey, isTokenCredentialAuthenticated());
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
* <pre>
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&
*
* System.out.printf&
* client.getProperties&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
}
/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
*
* Response<PathProperties> response = client.getPropertiesWithResponse&
* new Context&
*
* System.out.printf&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Response<PathProperties> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;
PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.exists -->
*
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
* <p>For example, a DataLakeFileClient representing a path to a datalake directory will return true, and vice
* versa.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the path exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() ->
blockBlobClient.existsWithResponse(timeout, context), LOGGER);
}
BlockBlobClient getBlockBlobClient() {
return blockBlobClient;
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(dataLakeServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}.
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a user delegation SAS.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* PathSasPermission myPermission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link DataLakeServiceClient
* on how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues) {
return generateSas(dataLakeServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the path using the specified {@link DataLakeServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link DataLakeServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* PathSasPermission permission = new PathSasPermission&
*
* DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.generateSas
*
* @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSignatureValues, Context context) {
return new DataLakeSasImplUtil(dataLakeServiceSasSignatureValues, getFileSystemName(), getObjectPath(),
PathResourceType.DIRECTORY.equals(this.pathResourceType))
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} |
We don't throw Cred Unavailable exception when constructing the credential. Validating and throwing on user provided input params is fine in the build method. What logic are other languages following here ? | public AzurePipelinesCredential build() {
Configuration configuration = identityClientOptions.getConfiguration();
if (configuration == null) {
configuration = Configuration.getGlobalConfiguration();
}
String oidcEndpoint = configuration.get("SYSTEM_OIDCREQUESTURI");
try {
ValidationUtil.validate(getClass().getSimpleName(),
LOGGER,
Arrays.asList("clientId", "tenantId", "serviceConnectionId", "systemAccessToken", "oidcEndpoint"),
Arrays.asList(clientId, tenantId, serviceConnectionId, this.systemAccessToken, oidcEndpoint));
} catch (IllegalArgumentException e) {
throw LOGGER.logExceptionAsError(new CredentialUnavailableException(
"One or more required properties are null or empty. Set the appropriate value on the builder."
+ " See https:
e));
}
String requestUrl = String.format("%s?api-version=%s&serviceConnectionId=%s",
oidcEndpoint, OIDC_API_VERSION, serviceConnectionId);
return new AzurePipelinesCredential(clientId, tenantId, requestUrl, systemAccessToken, identityClientOptions.clone());
} | throw LOGGER.logExceptionAsError(new CredentialUnavailableException( | public AzurePipelinesCredential build() {
Configuration configuration = identityClientOptions.getConfiguration();
if (configuration == null) {
configuration = Configuration.getGlobalConfiguration();
}
String oidcEndpoint = configuration.get("SYSTEM_OIDCREQUESTURI");
ValidationUtil.validate(getClass().getSimpleName(),
LOGGER,
Arrays.asList("clientId", "tenantId", "serviceConnectionId", "systemAccessToken", "oidcEndpoint"),
Arrays.asList(clientId, tenantId, serviceConnectionId, this.systemAccessToken, oidcEndpoint));
String requestUrl = String.format("%s?api-version=%s&serviceConnectionId=%s",
oidcEndpoint, OIDC_API_VERSION, serviceConnectionId);
return new AzurePipelinesCredential(clientId, tenantId, requestUrl, systemAccessToken, identityClientOptions.clone());
} | class AzurePipelinesCredentialBuilder extends AadCredentialBuilderBase<AzurePipelinesCredentialBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(AzurePipelinesCredentialBuilder.class);
private static final String OIDC_API_VERSION = "7.1";
private String serviceConnectionId;
private String systemAccessToken;
/**
* Sets the service connection id for the Azure Pipelines service connection. The service connection ID is
* retrieved from the Service Connection in the portal.
*
* @param serviceConnectionId The service connection ID, as found in the query string's resourceId key.
* @return the updated instance of the builder.
*/
public AzurePipelinesCredentialBuilder serviceConnectionId(String serviceConnectionId) {
this.serviceConnectionId = serviceConnectionId;
return this;
}
/**
* Sets the System Access Token for the Azure Pipelines service connection. The system access token is
* retrieved from the pipeline variables by assigning it to an environment variable and reading it.
* See {@link AzurePipelinesCredential} for more information.
*
* @param systemAccessToken the system access token for the Azure Pipelines service connection.
* @return The updated instance of the builder.
*/
public AzurePipelinesCredentialBuilder systemAccessToken(String systemAccessToken) {
this.systemAccessToken = systemAccessToken;
return this;
}
/**
* Builds an instance of the {@link AzurePipelinesCredential} with the current configurations.
* @return an instance of the {@link AzurePipelinesCredential}.
*/
} | class AzurePipelinesCredentialBuilder extends AadCredentialBuilderBase<AzurePipelinesCredentialBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(AzurePipelinesCredentialBuilder.class);
private static final String OIDC_API_VERSION = "7.1";
private String serviceConnectionId;
private String systemAccessToken;
/**
* Sets the service connection id for the Azure Pipelines service connection. The service connection ID is
* retrieved from the Service Connection in the portal.
*
* @param serviceConnectionId The service connection ID, as found in the query string's resourceId key.
* @return the updated instance of the builder.
*/
public AzurePipelinesCredentialBuilder serviceConnectionId(String serviceConnectionId) {
this.serviceConnectionId = serviceConnectionId;
return this;
}
/**
* Sets the System Access Token for the Azure Pipelines service connection. The system access token is
* retrieved from the pipeline variables by assigning it to an environment variable and reading it.
* See {@link AzurePipelinesCredential} for more information.
*
* @param systemAccessToken the system access token for the Azure Pipelines service connection.
* @return The updated instance of the builder.
*/
public AzurePipelinesCredentialBuilder systemAccessToken(String systemAccessToken) {
this.systemAccessToken = systemAccessToken;
return this;
}
/**
* Builds an instance of the {@link AzurePipelinesCredential} with the current configurations. Requires setting
* the following parameters:
* <ul>
* <li>Client ID via {@link
* <li>Tenant ID via {@link
* <li>Service Connection ID via {@link
* <li>System Access Token via {@link
* </ul>
* Requires the {@code SYSTEM_OIDCREQUESTURI} environment variable to be set.
*
* @throws IllegalArgumentException Thrown when required parameters are set or the environment is not correctly
* configured.
* @return an instance of the {@link AzurePipelinesCredential}.
*/
} |
Redact the subscriptionId from resource "id" property. | private void addSanitizers() {
List<TestProxySanitizer> sanitizers = Arrays.asList(
new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
);
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
} | new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), | private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
} | class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
} | class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
} |
Since now we have sanitizer on "id" property, use this to compare resource ID. | protected void assertResourceIdEquals(String expected, String actual) {
String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID);
String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID);
Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual));
} | } | protected void assertResourceIdEquals(String expected, String actual) {
String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID);
String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID);
Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual));
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
@Override
public void write(byte[] b) {
}
@Override
public void write(byte[] b, int off, int len) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60));
/**
* Initializes ResourceManagerTestProxyTestBase class.
*/
protected ResourceManagerTestProxyTestBase() {
}
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* Generates a random UUID.
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* Generates a random password.
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* Generates an SSH public key.
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX);
/**
* Asserts that the resource ID is same.
*
* @param expected the expected resource ID.
* @param actual the actual resource ID.
*/
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* Gets the test profile.
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* Checks whether test mode is {@link TestMode
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* Checks whether test should be skipped in playback.
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match"))));
addSanitizers();
removeSanitizers();
}
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String tenantId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID),
"'AZURE_TENANT_ID' environment variable cannot be null.");
String subscriptionId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID),
"'AZURE_SUBSCRIPTION_ID' environment variable cannot be null.");
credential = new DefaultAzureCredentialBuilder()
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
removeSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
}
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException ignored) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
private void removeSanitizers() {
interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493");
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
@Override
public void write(byte[] b) {
}
@Override
public void write(byte[] b, int off, int len) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60));
/**
* Initializes ResourceManagerTestProxyTestBase class.
*/
protected ResourceManagerTestProxyTestBase() {
}
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* Generates a random UUID.
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* Generates a random password.
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* Generates an SSH public key.
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX);
/**
* Asserts that the resource ID is same.
*
* @param expected the expected resource ID.
* @param actual the actual resource ID.
*/
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
protected String clientIdFromFile() {
String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
return testResourceNamer.recordValueFromConfig(clientId);
}
/**
* Gets the test profile.
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* Checks whether test mode is {@link TestMode
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* Checks whether test should be skipped in playback.
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match"))));
addSanitizers();
removeSanitizers();
}
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String tenantId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID),
"'AZURE_TENANT_ID' environment variable cannot be null.");
String subscriptionId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID),
"'AZURE_SUBSCRIPTION_ID' environment variable cannot be null.");
credential = new DefaultAzureCredentialBuilder()
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
removeSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
}
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException ignored) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
private void removeSanitizers() {
interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493");
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} |
Define it at `private static final Region REGION` | public void testCreateCatalog() {
Catalog catalog = null;
try {
String catalogName = "catalog" + randomPadding();
catalog = azureSphereManager.catalogs()
.define(catalogName)
.withRegion(Region.create("global", "Global"))
.withExistingResourceGroup(resourceGroupName)
.create();
catalog.refresh();
Assertions.assertEquals(catalogName, catalog.name());
Assertions.assertEquals(catalogName, azureSphereManager.catalogs().getById(catalog.id()).name());
Assertions.assertTrue(azureSphereManager.catalogs().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (catalog != null) {
azureSphereManager.catalogs().deleteById(catalog.id());
}
}
} | .withRegion(Region.create("global", "Global")) | public void testCreateCatalog() {
Catalog catalog = null;
try {
String catalogName = "catalog" + randomPadding();
catalog = azureSphereManager.catalogs()
.define(catalogName)
.withRegion(REGION_GLOBAL)
.withExistingResourceGroup(resourceGroupName)
.create();
catalog.refresh();
Assertions.assertEquals(catalogName, catalog.name());
Assertions.assertEquals(catalogName, azureSphereManager.catalogs().getById(catalog.id()).name());
Assertions.assertTrue(azureSphereManager.catalogs().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (catalog != null) {
azureSphereManager.catalogs().deleteById(catalog.id());
}
}
} | class AzureSphereManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private AzureSphereManager azureSphereManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
azureSphereManager = AzureSphereManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class AzureSphereManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION_USEAST = Region.US_EAST;
private static final Region REGION_GLOBAL = Region.create("global", "Global");
private String resourceGroupName = "rg" + randomPadding();
private AzureSphereManager azureSphereManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
azureSphereManager = AzureSphereManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION_USEAST)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Fixed in the new version. | public void testCreateCatalog() {
Catalog catalog = null;
try {
String catalogName = "catalog" + randomPadding();
catalog = azureSphereManager.catalogs()
.define(catalogName)
.withRegion(Region.create("global", "Global"))
.withExistingResourceGroup(resourceGroupName)
.create();
catalog.refresh();
Assertions.assertEquals(catalogName, catalog.name());
Assertions.assertEquals(catalogName, azureSphereManager.catalogs().getById(catalog.id()).name());
Assertions.assertTrue(azureSphereManager.catalogs().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (catalog != null) {
azureSphereManager.catalogs().deleteById(catalog.id());
}
}
} | .withRegion(Region.create("global", "Global")) | public void testCreateCatalog() {
Catalog catalog = null;
try {
String catalogName = "catalog" + randomPadding();
catalog = azureSphereManager.catalogs()
.define(catalogName)
.withRegion(REGION_GLOBAL)
.withExistingResourceGroup(resourceGroupName)
.create();
catalog.refresh();
Assertions.assertEquals(catalogName, catalog.name());
Assertions.assertEquals(catalogName, azureSphereManager.catalogs().getById(catalog.id()).name());
Assertions.assertTrue(azureSphereManager.catalogs().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (catalog != null) {
azureSphereManager.catalogs().deleteById(catalog.id());
}
}
} | class AzureSphereManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private AzureSphereManager azureSphereManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
azureSphereManager = AzureSphereManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class AzureSphereManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION_USEAST = Region.US_EAST;
private static final Region REGION_GLOBAL = Region.create("global", "Global");
private String resourceGroupName = "rg" + randomPadding();
private AzureSphereManager azureSphereManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
azureSphereManager = AzureSphereManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION_USEAST)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@LiveOnly
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
So, here we used to create one batch and add Iterable messages, but the return value of `tryAddMessage` is ignored resulting those messages to be dropped. | private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) {
if (Objects.isNull(messages)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
final Iterator<ServiceBusMessage> messagesItr = messages.iterator();
if (messagesItr.hasNext()) {
return sendNextIterableBatch(messagesItr.next(), messagesItr, transaction);
} else {
return Mono.empty();
}
} | return sendNextIterableBatch(messagesItr.next(), messagesItr, transaction); | private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) {
if (Objects.isNull(messages)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
final Iterator<ServiceBusMessage> messagesItr = messages.iterator();
if (messagesItr.hasNext()) {
return sendNextIterableBatch(messagesItr.next(), messagesItr, transaction);
} else {
return Mono.empty();
}
} | class ServiceBusSenderAsyncClient implements AutoCloseable {
/**
* The default maximum allowable size, in bytes, for a batch to be sent.
*/
static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024;
private static final String TRANSACTION_LINK_NAME = "coordinator";
private static final ServiceBusMessage END = new ServiceBusMessage(new byte[0]);
private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions();
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSenderAsyncClient.class);
private final AtomicReference<String> linkName = new AtomicReference<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageSerializer messageSerializer;
private final AmqpRetryOptions retryOptions;
private final AmqpRetryPolicy retryPolicy;
private final MessagingEntityType entityType;
private final Runnable onClientClose;
private final String entityName;
private final Mono<ServiceBusAmqpConnection> connectionProcessor;
private final String fullyQualifiedNamespace;
private final String viaEntityName;
private final String identifier;
private final ServiceBusSenderInstrumentation instrumentation;
private final ServiceBusTracer tracer;
private final boolean isV2;
/**
* Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity.
*/
ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType,
ConnectionCacheWrapper connectionCacheWrapper, AmqpRetryOptions retryOptions, ServiceBusSenderInstrumentation instrumentation,
MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName, String identifier) {
this.messageSerializer = Objects.requireNonNull(messageSerializer,
"'messageSerializer' cannot be null.");
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null.");
Objects.requireNonNull(connectionCacheWrapper, "'connectionCacheWrapper' cannot be null.");
this.connectionProcessor = connectionCacheWrapper.getConnection();
this.fullyQualifiedNamespace = connectionCacheWrapper.getFullyQualifiedNamespace();
this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null.");
this.tracer = instrumentation.getTracer();
this.retryPolicy = getRetryPolicy(retryOptions);
this.entityType = entityType;
this.viaEntityName = viaEntityName;
this.onClientClose = onClientClose;
this.identifier = identifier;
this.isV2 = connectionCacheWrapper.isV2();
}
/**
* Gets the fully qualified namespace.
*
* @return The fully qualified namespace.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the name of the Service Bus resource.
*
* @return The name of the Service Bus resource.
*/
public String getEntityPath() {
return entityName;
}
/**
* Gets the identifier of the instance of {@link ServiceBusSenderAsyncClient}.
*
* @return The identifier that can identify the instance of {@link ServiceBusSenderAsyncClient}.
*/
public String getIdentifier() {
return identifier;
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message) {
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
return sendInternal(Flux.just(message), null);
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(Flux.just(message), transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages
* exceed the maximum size of a single batch, an exception will be triggered and the send will fail.
* By default, the message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendIterable(messages, transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed
* the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the
* message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code messages} is {@code null}.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) {
return sendIterable(messages, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch) {
return sendInternal(batch, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(batch, transactionContext);
}
/**
* Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
*
* @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch() {
return createMessageBatch(DEFAULT_BATCH_OPTIONS);
}
/**
* Creates an {@link ServiceBusMessageBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link ServiceBusMessageBatch}.
*
* @return A new {@link ServiceBusMessageBatch} configured with the given options.
* @throws NullPointerException if {@code options} is null.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
* @throws IllegalArgumentException if {@link CreateMessageBatchOptions
* maximum allowed size.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch")));
}
if (Objects.isNull(options)) {
return monoError(LOGGER, new NullPointerException("'options' cannot be null."));
}
final int maxSize = options.getMaximumSizeInBytes();
final Mono<ServiceBusMessageBatch> createBatch = getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
final int maximumLinkSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
if (maxSize > maximumLinkSize) {
return monoError(LOGGER, new IllegalArgumentException(String.format(Locale.US,
"CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size"
+ " (%s bytes).", maxSize, maximumLinkSize)));
}
final int batchSize = maxSize > 0
? maxSize
: maximumLinkSize;
return Mono.just(new ServiceBusMessageBatch(isV2, batchSize, link::getErrorContext, tracer, messageSerializer));
})).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
return withRetry(createBatch, retryOptions,
String.format("entityPath[%s]: Creating batch timed out.", entityName))
.onErrorMap(this::mapError);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
* @param transactionContext to be set on message before sending to Service Bus.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionID} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessageInternal(message, scheduledEnqueueTime, null);
}
/**
* Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled
* message is enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus queue or topic.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessages(messages, scheduledEnqueueTime, null);
}
/**
* Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic.
* @param transactionContext Transaction to associate with the operation.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled or the {@code message} is larger than
* the maximum size of the {@link ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return fluxError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages")));
}
if (Objects.isNull(messages)) {
return fluxError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return fluxError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return createMessageBatch()
.map(messageBatch -> {
int index = 0;
for (ServiceBusMessage message : messages) {
if (!messageBatch.tryAddMessage(message)) {
final String error = String.format(Locale.US,
"Messages exceed max allowed size for all the messages together. "
+ "Failed to add message at index '%s'.", index);
throw LOGGER.logExceptionAsError(new IllegalArgumentException(error));
}
++index;
}
return messageBatch;
})
.flatMapMany(messageBatch ->
tracer.traceScheduleFlux("ServiceBus.scheduleMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime,
messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)),
messageBatch.getMessages())
).onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of a scheduled message, if it was not already enqueued.
*
* @param sequenceNumber of the scheduled message to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws IllegalArgumentException if {@code sequenceNumber} is negative.
* @throws ServiceBusException If the messages could not be cancelled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> cancelScheduledMessage(long sequenceNumber) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage")));
}
if (sequenceNumber < 0) {
return monoError(LOGGER, new IllegalArgumentException("'sequenceNumber' cannot be negative."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessage",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(
Collections.singletonList(sequenceNumber), linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of an already scheduled message, if it was not already enqueued.
*
* @param sequenceNumbers of the scheduled messages to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code sequenceNumbers} is null.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the scheduled messages cannot cancelled.
*/
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages")));
}
if (Objects.isNull(sequenceNumbers)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with
* {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction.
*
* @return A new {@link ServiceBusTransactionContext}.
*
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if a transaction cannot be created.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction")));
}
return tracer.traceMono("ServiceBus.createTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())))
.onErrorMap(this::mapError);
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext to be committed.
*
* @return The {@link Mono} that finishes this operation on Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be committed.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return
tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext Transaction to rollback.
*
* @return The {@link Mono} that finishes this operation on the Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be rolled back.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return tracer.traceMono("ServiceBus.rollbackTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying
* connection is also closed.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
onClientClose.run();
}
private Mono<Void> sendNextIterableBatch(ServiceBusMessage first, Iterator<ServiceBusMessage> messagesItr,
ServiceBusTransactionContext transaction) {
return this.createMessageBatch().flatMap(batch -> {
ServiceBusMessage next = first;
do {
if (!batch.tryAddMessage(next)) {
if (next == first) {
return monoError(LOGGER,
new IllegalArgumentException("The message " + first + " is too big to send even in a batch."));
}
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(next));
} else {
return this.sendMessages(batch).then(Mono.just(next));
}
}
if (messagesItr.hasNext()) {
next = messagesItr.next();
} else {
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(END));
} else {
return this.sendMessages(batch).then(Mono.just(END));
}
}
} while (true);
}).flatMap(missed -> {
if (missed == END) {
return Mono.empty();
} else {
return sendNextIterableBatch(missed, messagesItr, transaction);
}
});
}
private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage")));
}
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return monoError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return tracer.traceScheduleMono("ServiceBus.scheduleMessage",
getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
int maxSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime,
maxSize, link.getLinkName(), transactionContext)
.next());
})),
message, message.getContext())
.onErrorMap(this::mapError);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*/
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages")));
}
if (Objects.isNull(batch)) {
return monoError(LOGGER, new NullPointerException("'batch' cannot be null."));
}
if (batch.getMessages().isEmpty()) {
LOGGER.info("Cannot send an EventBatch that is empty.");
return Mono.empty();
}
LOGGER.atInfo()
.addKeyValue("batchSize", batch.getCount())
.log("Sending batch.");
final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>());
batch.getMessages().forEach(serviceBusMessage -> {
final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage);
final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null
? new MessageAnnotations(new HashMap<>())
: message.getMessageAnnotations();
message.setMessageAnnotations(messageAnnotations);
messages.add(message);
});
final Mono<Void> sendMessage = getSendLink().flatMap(link -> {
if (transactionContext != null && transactionContext.getTransactionId() != null) {
final TransactionalState deliveryState = new TransactionalState();
deliveryState.setTxnId(Binary.create(transactionContext.getTransactionId()));
return messages.size() == 1
? link.send(messages.get(0), deliveryState)
: link.send(messages, deliveryState);
} else {
return messages.size() == 1
? link.send(messages.get(0))
: link.send(messages);
}
}).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
final Mono<Void> sendWithRetry = withRetry(sendMessage, retryOptions,
String.format("entityPath[%s], messages-count[%s]: Sending messages timed out.", entityName, batch.getCount()))
.onErrorMap(this::mapError);
return instrumentation.instrumentSendBatch("ServiceBus.send", sendWithRetry, batch.getMessages());
}
private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage")));
}
return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName)
.flatMap(link -> link.getLinkSize()
.flatMap(size -> {
final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES;
final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(batchSize);
return messages.collect(new AmqpMessageCollector(isV2, batchOptions, 1,
link::getErrorContext, tracer, messageSerializer));
})
.flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext)))
.onErrorMap(this::mapError);
}
private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches,
ServiceBusTransactionContext transactionContext) {
return eventBatches
.flatMap(messageBatch -> sendInternal(messageBatch, transactionContext))
.then()
.doOnError(error -> LOGGER.error("Error sending batch.", error));
}
private Mono<AmqpSendLink> getSendLink() {
return connectionProcessor
.flatMap(connection -> {
if (!CoreUtils.isNullOrEmpty(viaEntityName)) {
return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions,
entityName, identifier);
} else {
return connection.createSendLink(entityName, entityName, retryOptions, null, identifier);
}
})
.doOnNext(next -> linkName.compareAndSet(null, next.getLinkName()));
}
private Throwable mapError(Throwable throwable) {
if (!(throwable instanceof ServiceBusException)) {
return new ServiceBusException(throwable, ServiceBusErrorSource.SEND);
}
return throwable;
}
private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>,
List<ServiceBusMessageBatch>> {
private final int maxMessageSize;
private final Integer maxNumberOfBatches;
private final ErrorContextProvider contextProvider;
private final ServiceBusTracer tracer;
private final MessageSerializer serializer;
private final boolean isV2;
private volatile ServiceBusMessageBatch currentBatch;
AmqpMessageCollector(boolean isV2, CreateMessageBatchOptions options, Integer maxNumberOfBatches,
ErrorContextProvider contextProvider, ServiceBusTracer tracer, MessageSerializer serializer) {
this.maxNumberOfBatches = maxNumberOfBatches;
this.maxMessageSize = options.getMaximumSizeInBytes() > 0
? options.getMaximumSizeInBytes()
: MAX_MESSAGE_LENGTH_BYTES;
this.contextProvider = contextProvider;
this.tracer = tracer;
this.serializer = serializer;
this.isV2 = isV2;
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
}
@Override
public Supplier<List<ServiceBusMessageBatch>> supplier() {
return ArrayList::new;
}
@Override
public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() {
return (list, event) -> {
ServiceBusMessageBatch batch = currentBatch;
if (batch.tryAddMessage(event)) {
return;
}
if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) {
final String message = String.format(Locale.US,
"EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches);
throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message,
contextProvider.getErrorContext());
}
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
currentBatch.tryAddMessage(event);
list.add(batch);
};
}
@Override
public BinaryOperator<List<ServiceBusMessageBatch>> combiner() {
return (existing, another) -> {
existing.addAll(another);
return existing;
};
}
@Override
public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() {
return list -> {
ServiceBusMessageBatch batch = currentBatch;
currentBatch = null;
if (batch != null) {
list.add(batch);
}
return list;
};
}
@Override
public Set<Characteristics> characteristics() {
return Collections.emptySet();
}
}
} | class ServiceBusSenderAsyncClient implements AutoCloseable {
/**
* The default maximum allowable size, in bytes, for a batch to be sent.
*/
static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024;
private static final String TRANSACTION_LINK_NAME = "coordinator";
private static final ServiceBusMessage END = new ServiceBusMessage(new byte[0]);
private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions();
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSenderAsyncClient.class);
private final AtomicReference<String> linkName = new AtomicReference<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageSerializer messageSerializer;
private final AmqpRetryOptions retryOptions;
private final AmqpRetryPolicy retryPolicy;
private final MessagingEntityType entityType;
private final Runnable onClientClose;
private final String entityName;
private final Mono<ServiceBusAmqpConnection> connectionProcessor;
private final String fullyQualifiedNamespace;
private final String viaEntityName;
private final String identifier;
private final ServiceBusSenderInstrumentation instrumentation;
private final ServiceBusTracer tracer;
private final boolean isV2;
/**
* Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity.
*/
ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType,
ConnectionCacheWrapper connectionCacheWrapper, AmqpRetryOptions retryOptions, ServiceBusSenderInstrumentation instrumentation,
MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName, String identifier) {
this.messageSerializer = Objects.requireNonNull(messageSerializer,
"'messageSerializer' cannot be null.");
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null.");
Objects.requireNonNull(connectionCacheWrapper, "'connectionCacheWrapper' cannot be null.");
this.connectionProcessor = connectionCacheWrapper.getConnection();
this.fullyQualifiedNamespace = connectionCacheWrapper.getFullyQualifiedNamespace();
this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null.");
this.tracer = instrumentation.getTracer();
this.retryPolicy = getRetryPolicy(retryOptions);
this.entityType = entityType;
this.viaEntityName = viaEntityName;
this.onClientClose = onClientClose;
this.identifier = identifier;
this.isV2 = connectionCacheWrapper.isV2();
}
/**
* Gets the fully qualified namespace.
*
* @return The fully qualified namespace.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the name of the Service Bus resource.
*
* @return The name of the Service Bus resource.
*/
public String getEntityPath() {
return entityName;
}
/**
* Gets the identifier of the instance of {@link ServiceBusSenderAsyncClient}.
*
* @return The identifier that can identify the instance of {@link ServiceBusSenderAsyncClient}.
*/
public String getIdentifier() {
return identifier;
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message) {
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
return sendInternal(Flux.just(message), null);
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(Flux.just(message), transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages
* exceed the maximum size of a single batch, an exception will be triggered and the send will fail.
* By default, the message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendIterable(messages, transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed
* the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the
* message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code messages} is {@code null}.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) {
return sendIterable(messages, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch) {
return sendInternal(batch, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(batch, transactionContext);
}
/**
* Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
*
* @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch() {
return createMessageBatch(DEFAULT_BATCH_OPTIONS);
}
/**
* Creates an {@link ServiceBusMessageBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link ServiceBusMessageBatch}.
*
* @return A new {@link ServiceBusMessageBatch} configured with the given options.
* @throws NullPointerException if {@code options} is null.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
* @throws IllegalArgumentException if {@link CreateMessageBatchOptions
* maximum allowed size.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch")));
}
if (Objects.isNull(options)) {
return monoError(LOGGER, new NullPointerException("'options' cannot be null."));
}
final int maxSize = options.getMaximumSizeInBytes();
final Mono<ServiceBusMessageBatch> createBatch = getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
final int maximumLinkSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
if (maxSize > maximumLinkSize) {
return monoError(LOGGER, new IllegalArgumentException(String.format(Locale.US,
"CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size"
+ " (%s bytes).", maxSize, maximumLinkSize)));
}
final int batchSize = maxSize > 0
? maxSize
: maximumLinkSize;
return Mono.just(new ServiceBusMessageBatch(isV2, batchSize, link::getErrorContext, tracer, messageSerializer));
})).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
return withRetry(createBatch, retryOptions,
String.format("entityPath[%s]: Creating batch timed out.", entityName))
.onErrorMap(this::mapError);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
* @param transactionContext to be set on message before sending to Service Bus.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionID} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessageInternal(message, scheduledEnqueueTime, null);
}
/**
* Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled
* message is enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus queue or topic.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessages(messages, scheduledEnqueueTime, null);
}
/**
* Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic.
* @param transactionContext Transaction to associate with the operation.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled or the {@code message} is larger than
* the maximum size of the {@link ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return fluxError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages")));
}
if (Objects.isNull(messages)) {
return fluxError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return fluxError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return createMessageBatch()
.map(messageBatch -> {
int index = 0;
for (ServiceBusMessage message : messages) {
if (!messageBatch.tryAddMessage(message)) {
final String error = String.format(Locale.US,
"Messages exceed max allowed size for all the messages together. "
+ "Failed to add message at index '%s'.", index);
throw LOGGER.logExceptionAsError(new IllegalArgumentException(error));
}
++index;
}
return messageBatch;
})
.flatMapMany(messageBatch ->
tracer.traceScheduleFlux("ServiceBus.scheduleMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime,
messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)),
messageBatch.getMessages())
).onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of a scheduled message, if it was not already enqueued.
*
* @param sequenceNumber of the scheduled message to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws IllegalArgumentException if {@code sequenceNumber} is negative.
* @throws ServiceBusException If the messages could not be cancelled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> cancelScheduledMessage(long sequenceNumber) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage")));
}
if (sequenceNumber < 0) {
return monoError(LOGGER, new IllegalArgumentException("'sequenceNumber' cannot be negative."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessage",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(
Collections.singletonList(sequenceNumber), linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of an already scheduled message, if it was not already enqueued.
*
* @param sequenceNumbers of the scheduled messages to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code sequenceNumbers} is null.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the scheduled messages cannot cancelled.
*/
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages")));
}
if (Objects.isNull(sequenceNumbers)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with
* {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction.
*
* @return A new {@link ServiceBusTransactionContext}.
*
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if a transaction cannot be created.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction")));
}
return tracer.traceMono("ServiceBus.createTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())))
.onErrorMap(this::mapError);
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext to be committed.
*
* @return The {@link Mono} that finishes this operation on Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be committed.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return
tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext Transaction to rollback.
*
* @return The {@link Mono} that finishes this operation on the Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be rolled back.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return tracer.traceMono("ServiceBus.rollbackTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying
* connection is also closed.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
onClientClose.run();
}
private Mono<Void> sendNextIterableBatch(ServiceBusMessage first, Iterator<ServiceBusMessage> messagesItr,
ServiceBusTransactionContext transaction) {
return this.createMessageBatch().flatMap(batch -> {
ServiceBusMessage next = first;
do {
if (!batch.tryAddMessage(next)) {
if (next == first) {
return monoError(LOGGER,
new IllegalArgumentException("The message " + first + " is too big to send even in a batch."));
}
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(next));
} else {
return this.sendMessages(batch).then(Mono.just(next));
}
}
if (messagesItr.hasNext()) {
next = messagesItr.next();
} else {
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(END));
} else {
return this.sendMessages(batch).then(Mono.just(END));
}
}
} while (true);
}).flatMap(missed -> {
if (missed == END) {
return Mono.empty();
} else {
return sendNextIterableBatch(missed, messagesItr, transaction);
}
});
}
private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage")));
}
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return monoError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return tracer.traceScheduleMono("ServiceBus.scheduleMessage",
getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
int maxSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime,
maxSize, link.getLinkName(), transactionContext)
.next());
})),
message, message.getContext())
.onErrorMap(this::mapError);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*/
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages")));
}
if (Objects.isNull(batch)) {
return monoError(LOGGER, new NullPointerException("'batch' cannot be null."));
}
if (batch.getMessages().isEmpty()) {
LOGGER.info("Cannot send an EventBatch that is empty.");
return Mono.empty();
}
LOGGER.atInfo()
.addKeyValue("batchSize", batch.getCount())
.log("Sending batch.");
final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>());
batch.getMessages().forEach(serviceBusMessage -> {
final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage);
final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null
? new MessageAnnotations(new HashMap<>())
: message.getMessageAnnotations();
message.setMessageAnnotations(messageAnnotations);
messages.add(message);
});
final Mono<Void> sendMessage = getSendLink().flatMap(link -> {
if (transactionContext != null && transactionContext.getTransactionId() != null) {
final TransactionalState deliveryState = new TransactionalState();
deliveryState.setTxnId(Binary.create(transactionContext.getTransactionId()));
return messages.size() == 1
? link.send(messages.get(0), deliveryState)
: link.send(messages, deliveryState);
} else {
return messages.size() == 1
? link.send(messages.get(0))
: link.send(messages);
}
}).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
final Mono<Void> sendWithRetry = withRetry(sendMessage, retryOptions,
String.format("entityPath[%s], messages-count[%s]: Sending messages timed out.", entityName, batch.getCount()))
.onErrorMap(this::mapError);
return instrumentation.instrumentSendBatch("ServiceBus.send", sendWithRetry, batch.getMessages());
}
private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage")));
}
return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName)
.flatMap(link -> link.getLinkSize()
.flatMap(size -> {
final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES;
final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(batchSize);
return messages.collect(new AmqpMessageCollector(isV2, batchOptions, 1,
link::getErrorContext, tracer, messageSerializer));
})
.flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext)))
.onErrorMap(this::mapError);
}
private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches,
ServiceBusTransactionContext transactionContext) {
return eventBatches
.flatMap(messageBatch -> sendInternal(messageBatch, transactionContext))
.then()
.doOnError(error -> LOGGER.error("Error sending batch.", error));
}
private Mono<AmqpSendLink> getSendLink() {
return connectionProcessor
.flatMap(connection -> {
if (!CoreUtils.isNullOrEmpty(viaEntityName)) {
return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions,
entityName, identifier);
} else {
return connection.createSendLink(entityName, entityName, retryOptions, null, identifier);
}
})
.doOnNext(next -> linkName.compareAndSet(null, next.getLinkName()));
}
private Throwable mapError(Throwable throwable) {
if (!(throwable instanceof ServiceBusException)) {
return new ServiceBusException(throwable, ServiceBusErrorSource.SEND);
}
return throwable;
}
private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>,
List<ServiceBusMessageBatch>> {
private final int maxMessageSize;
private final Integer maxNumberOfBatches;
private final ErrorContextProvider contextProvider;
private final ServiceBusTracer tracer;
private final MessageSerializer serializer;
private final boolean isV2;
private volatile ServiceBusMessageBatch currentBatch;
AmqpMessageCollector(boolean isV2, CreateMessageBatchOptions options, Integer maxNumberOfBatches,
ErrorContextProvider contextProvider, ServiceBusTracer tracer, MessageSerializer serializer) {
this.maxNumberOfBatches = maxNumberOfBatches;
this.maxMessageSize = options.getMaximumSizeInBytes() > 0
? options.getMaximumSizeInBytes()
: MAX_MESSAGE_LENGTH_BYTES;
this.contextProvider = contextProvider;
this.tracer = tracer;
this.serializer = serializer;
this.isV2 = isV2;
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
}
@Override
public Supplier<List<ServiceBusMessageBatch>> supplier() {
return ArrayList::new;
}
@Override
public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() {
return (list, event) -> {
ServiceBusMessageBatch batch = currentBatch;
if (batch.tryAddMessage(event)) {
return;
}
if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) {
final String message = String.format(Locale.US,
"EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches);
throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message,
contextProvider.getErrorContext());
}
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
currentBatch.tryAddMessage(event);
list.add(batch);
};
}
@Override
public BinaryOperator<List<ServiceBusMessageBatch>> combiner() {
return (existing, another) -> {
existing.addAll(another);
return existing;
};
}
@Override
public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() {
return list -> {
ServiceBusMessageBatch batch = currentBatch;
currentBatch = null;
if (batch != null) {
list.add(batch);
}
return list;
};
}
@Override
public Set<Characteristics> characteristics() {
return Collections.emptySet();
}
}
} |
Good catch | private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) {
if (Objects.isNull(messages)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
final Iterator<ServiceBusMessage> messagesItr = messages.iterator();
if (messagesItr.hasNext()) {
return sendNextIterableBatch(messagesItr.next(), messagesItr, transaction);
} else {
return Mono.empty();
}
} | return sendNextIterableBatch(messagesItr.next(), messagesItr, transaction); | private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) {
if (Objects.isNull(messages)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
final Iterator<ServiceBusMessage> messagesItr = messages.iterator();
if (messagesItr.hasNext()) {
return sendNextIterableBatch(messagesItr.next(), messagesItr, transaction);
} else {
return Mono.empty();
}
} | class ServiceBusSenderAsyncClient implements AutoCloseable {
/**
* The default maximum allowable size, in bytes, for a batch to be sent.
*/
static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024;
private static final String TRANSACTION_LINK_NAME = "coordinator";
private static final ServiceBusMessage END = new ServiceBusMessage(new byte[0]);
private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions();
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSenderAsyncClient.class);
private final AtomicReference<String> linkName = new AtomicReference<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageSerializer messageSerializer;
private final AmqpRetryOptions retryOptions;
private final AmqpRetryPolicy retryPolicy;
private final MessagingEntityType entityType;
private final Runnable onClientClose;
private final String entityName;
private final Mono<ServiceBusAmqpConnection> connectionProcessor;
private final String fullyQualifiedNamespace;
private final String viaEntityName;
private final String identifier;
private final ServiceBusSenderInstrumentation instrumentation;
private final ServiceBusTracer tracer;
private final boolean isV2;
/**
* Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity.
*/
ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType,
ConnectionCacheWrapper connectionCacheWrapper, AmqpRetryOptions retryOptions, ServiceBusSenderInstrumentation instrumentation,
MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName, String identifier) {
this.messageSerializer = Objects.requireNonNull(messageSerializer,
"'messageSerializer' cannot be null.");
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null.");
Objects.requireNonNull(connectionCacheWrapper, "'connectionCacheWrapper' cannot be null.");
this.connectionProcessor = connectionCacheWrapper.getConnection();
this.fullyQualifiedNamespace = connectionCacheWrapper.getFullyQualifiedNamespace();
this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null.");
this.tracer = instrumentation.getTracer();
this.retryPolicy = getRetryPolicy(retryOptions);
this.entityType = entityType;
this.viaEntityName = viaEntityName;
this.onClientClose = onClientClose;
this.identifier = identifier;
this.isV2 = connectionCacheWrapper.isV2();
}
/**
* Gets the fully qualified namespace.
*
* @return The fully qualified namespace.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the name of the Service Bus resource.
*
* @return The name of the Service Bus resource.
*/
public String getEntityPath() {
return entityName;
}
/**
* Gets the identifier of the instance of {@link ServiceBusSenderAsyncClient}.
*
* @return The identifier that can identify the instance of {@link ServiceBusSenderAsyncClient}.
*/
public String getIdentifier() {
return identifier;
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message) {
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
return sendInternal(Flux.just(message), null);
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(Flux.just(message), transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages
* exceed the maximum size of a single batch, an exception will be triggered and the send will fail.
* By default, the message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendIterable(messages, transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed
* the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the
* message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code messages} is {@code null}.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) {
return sendIterable(messages, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch) {
return sendInternal(batch, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(batch, transactionContext);
}
/**
* Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
*
* @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch() {
return createMessageBatch(DEFAULT_BATCH_OPTIONS);
}
/**
* Creates an {@link ServiceBusMessageBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link ServiceBusMessageBatch}.
*
* @return A new {@link ServiceBusMessageBatch} configured with the given options.
* @throws NullPointerException if {@code options} is null.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
* @throws IllegalArgumentException if {@link CreateMessageBatchOptions
* maximum allowed size.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch")));
}
if (Objects.isNull(options)) {
return monoError(LOGGER, new NullPointerException("'options' cannot be null."));
}
final int maxSize = options.getMaximumSizeInBytes();
final Mono<ServiceBusMessageBatch> createBatch = getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
final int maximumLinkSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
if (maxSize > maximumLinkSize) {
return monoError(LOGGER, new IllegalArgumentException(String.format(Locale.US,
"CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size"
+ " (%s bytes).", maxSize, maximumLinkSize)));
}
final int batchSize = maxSize > 0
? maxSize
: maximumLinkSize;
return Mono.just(new ServiceBusMessageBatch(isV2, batchSize, link::getErrorContext, tracer, messageSerializer));
})).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
return withRetry(createBatch, retryOptions,
String.format("entityPath[%s]: Creating batch timed out.", entityName))
.onErrorMap(this::mapError);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
* @param transactionContext to be set on message before sending to Service Bus.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionID} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessageInternal(message, scheduledEnqueueTime, null);
}
/**
* Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled
* message is enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus queue or topic.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessages(messages, scheduledEnqueueTime, null);
}
/**
* Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic.
* @param transactionContext Transaction to associate with the operation.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled or the {@code message} is larger than
* the maximum size of the {@link ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return fluxError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages")));
}
if (Objects.isNull(messages)) {
return fluxError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return fluxError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return createMessageBatch()
.map(messageBatch -> {
int index = 0;
for (ServiceBusMessage message : messages) {
if (!messageBatch.tryAddMessage(message)) {
final String error = String.format(Locale.US,
"Messages exceed max allowed size for all the messages together. "
+ "Failed to add message at index '%s'.", index);
throw LOGGER.logExceptionAsError(new IllegalArgumentException(error));
}
++index;
}
return messageBatch;
})
.flatMapMany(messageBatch ->
tracer.traceScheduleFlux("ServiceBus.scheduleMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime,
messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)),
messageBatch.getMessages())
).onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of a scheduled message, if it was not already enqueued.
*
* @param sequenceNumber of the scheduled message to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws IllegalArgumentException if {@code sequenceNumber} is negative.
* @throws ServiceBusException If the messages could not be cancelled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> cancelScheduledMessage(long sequenceNumber) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage")));
}
if (sequenceNumber < 0) {
return monoError(LOGGER, new IllegalArgumentException("'sequenceNumber' cannot be negative."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessage",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(
Collections.singletonList(sequenceNumber), linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of an already scheduled message, if it was not already enqueued.
*
* @param sequenceNumbers of the scheduled messages to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code sequenceNumbers} is null.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the scheduled messages cannot cancelled.
*/
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages")));
}
if (Objects.isNull(sequenceNumbers)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with
* {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction.
*
* @return A new {@link ServiceBusTransactionContext}.
*
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if a transaction cannot be created.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction")));
}
return tracer.traceMono("ServiceBus.createTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())))
.onErrorMap(this::mapError);
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext to be committed.
*
* @return The {@link Mono} that finishes this operation on Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be committed.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return
tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext Transaction to rollback.
*
* @return The {@link Mono} that finishes this operation on the Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be rolled back.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return tracer.traceMono("ServiceBus.rollbackTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying
* connection is also closed.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
onClientClose.run();
}
private Mono<Void> sendNextIterableBatch(ServiceBusMessage first, Iterator<ServiceBusMessage> messagesItr,
ServiceBusTransactionContext transaction) {
return this.createMessageBatch().flatMap(batch -> {
ServiceBusMessage next = first;
do {
if (!batch.tryAddMessage(next)) {
if (next == first) {
return monoError(LOGGER,
new IllegalArgumentException("The message " + first + " is too big to send even in a batch."));
}
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(next));
} else {
return this.sendMessages(batch).then(Mono.just(next));
}
}
if (messagesItr.hasNext()) {
next = messagesItr.next();
} else {
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(END));
} else {
return this.sendMessages(batch).then(Mono.just(END));
}
}
} while (true);
}).flatMap(missed -> {
if (missed == END) {
return Mono.empty();
} else {
return sendNextIterableBatch(missed, messagesItr, transaction);
}
});
}
private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage")));
}
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return monoError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return tracer.traceScheduleMono("ServiceBus.scheduleMessage",
getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
int maxSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime,
maxSize, link.getLinkName(), transactionContext)
.next());
})),
message, message.getContext())
.onErrorMap(this::mapError);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*/
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages")));
}
if (Objects.isNull(batch)) {
return monoError(LOGGER, new NullPointerException("'batch' cannot be null."));
}
if (batch.getMessages().isEmpty()) {
LOGGER.info("Cannot send an EventBatch that is empty.");
return Mono.empty();
}
LOGGER.atInfo()
.addKeyValue("batchSize", batch.getCount())
.log("Sending batch.");
final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>());
batch.getMessages().forEach(serviceBusMessage -> {
final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage);
final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null
? new MessageAnnotations(new HashMap<>())
: message.getMessageAnnotations();
message.setMessageAnnotations(messageAnnotations);
messages.add(message);
});
final Mono<Void> sendMessage = getSendLink().flatMap(link -> {
if (transactionContext != null && transactionContext.getTransactionId() != null) {
final TransactionalState deliveryState = new TransactionalState();
deliveryState.setTxnId(Binary.create(transactionContext.getTransactionId()));
return messages.size() == 1
? link.send(messages.get(0), deliveryState)
: link.send(messages, deliveryState);
} else {
return messages.size() == 1
? link.send(messages.get(0))
: link.send(messages);
}
}).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
final Mono<Void> sendWithRetry = withRetry(sendMessage, retryOptions,
String.format("entityPath[%s], messages-count[%s]: Sending messages timed out.", entityName, batch.getCount()))
.onErrorMap(this::mapError);
return instrumentation.instrumentSendBatch("ServiceBus.send", sendWithRetry, batch.getMessages());
}
private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage")));
}
return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName)
.flatMap(link -> link.getLinkSize()
.flatMap(size -> {
final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES;
final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(batchSize);
return messages.collect(new AmqpMessageCollector(isV2, batchOptions, 1,
link::getErrorContext, tracer, messageSerializer));
})
.flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext)))
.onErrorMap(this::mapError);
}
private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches,
ServiceBusTransactionContext transactionContext) {
return eventBatches
.flatMap(messageBatch -> sendInternal(messageBatch, transactionContext))
.then()
.doOnError(error -> LOGGER.error("Error sending batch.", error));
}
private Mono<AmqpSendLink> getSendLink() {
return connectionProcessor
.flatMap(connection -> {
if (!CoreUtils.isNullOrEmpty(viaEntityName)) {
return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions,
entityName, identifier);
} else {
return connection.createSendLink(entityName, entityName, retryOptions, null, identifier);
}
})
.doOnNext(next -> linkName.compareAndSet(null, next.getLinkName()));
}
private Throwable mapError(Throwable throwable) {
if (!(throwable instanceof ServiceBusException)) {
return new ServiceBusException(throwable, ServiceBusErrorSource.SEND);
}
return throwable;
}
private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>,
List<ServiceBusMessageBatch>> {
private final int maxMessageSize;
private final Integer maxNumberOfBatches;
private final ErrorContextProvider contextProvider;
private final ServiceBusTracer tracer;
private final MessageSerializer serializer;
private final boolean isV2;
private volatile ServiceBusMessageBatch currentBatch;
AmqpMessageCollector(boolean isV2, CreateMessageBatchOptions options, Integer maxNumberOfBatches,
ErrorContextProvider contextProvider, ServiceBusTracer tracer, MessageSerializer serializer) {
this.maxNumberOfBatches = maxNumberOfBatches;
this.maxMessageSize = options.getMaximumSizeInBytes() > 0
? options.getMaximumSizeInBytes()
: MAX_MESSAGE_LENGTH_BYTES;
this.contextProvider = contextProvider;
this.tracer = tracer;
this.serializer = serializer;
this.isV2 = isV2;
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
}
@Override
public Supplier<List<ServiceBusMessageBatch>> supplier() {
return ArrayList::new;
}
@Override
public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() {
return (list, event) -> {
ServiceBusMessageBatch batch = currentBatch;
if (batch.tryAddMessage(event)) {
return;
}
if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) {
final String message = String.format(Locale.US,
"EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches);
throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message,
contextProvider.getErrorContext());
}
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
currentBatch.tryAddMessage(event);
list.add(batch);
};
}
@Override
public BinaryOperator<List<ServiceBusMessageBatch>> combiner() {
return (existing, another) -> {
existing.addAll(another);
return existing;
};
}
@Override
public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() {
return list -> {
ServiceBusMessageBatch batch = currentBatch;
currentBatch = null;
if (batch != null) {
list.add(batch);
}
return list;
};
}
@Override
public Set<Characteristics> characteristics() {
return Collections.emptySet();
}
}
} | class ServiceBusSenderAsyncClient implements AutoCloseable {
/**
* The default maximum allowable size, in bytes, for a batch to be sent.
*/
static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024;
private static final String TRANSACTION_LINK_NAME = "coordinator";
private static final ServiceBusMessage END = new ServiceBusMessage(new byte[0]);
private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions();
private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSenderAsyncClient.class);
private final AtomicReference<String> linkName = new AtomicReference<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageSerializer messageSerializer;
private final AmqpRetryOptions retryOptions;
private final AmqpRetryPolicy retryPolicy;
private final MessagingEntityType entityType;
private final Runnable onClientClose;
private final String entityName;
private final Mono<ServiceBusAmqpConnection> connectionProcessor;
private final String fullyQualifiedNamespace;
private final String viaEntityName;
private final String identifier;
private final ServiceBusSenderInstrumentation instrumentation;
private final ServiceBusTracer tracer;
private final boolean isV2;
/**
* Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity.
*/
ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType,
ConnectionCacheWrapper connectionCacheWrapper, AmqpRetryOptions retryOptions, ServiceBusSenderInstrumentation instrumentation,
MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName, String identifier) {
this.messageSerializer = Objects.requireNonNull(messageSerializer,
"'messageSerializer' cannot be null.");
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null.");
Objects.requireNonNull(connectionCacheWrapper, "'connectionCacheWrapper' cannot be null.");
this.connectionProcessor = connectionCacheWrapper.getConnection();
this.fullyQualifiedNamespace = connectionCacheWrapper.getFullyQualifiedNamespace();
this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null.");
this.tracer = instrumentation.getTracer();
this.retryPolicy = getRetryPolicy(retryOptions);
this.entityType = entityType;
this.viaEntityName = viaEntityName;
this.onClientClose = onClientClose;
this.identifier = identifier;
this.isV2 = connectionCacheWrapper.isV2();
}
/**
* Gets the fully qualified namespace.
*
* @return The fully qualified namespace.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the name of the Service Bus resource.
*
* @return The name of the Service Bus resource.
*/
public String getEntityPath() {
return entityName;
}
/**
* Gets the identifier of the instance of {@link ServiceBusSenderAsyncClient}.
*
* @return The identifier that can identify the instance of {@link ServiceBusSenderAsyncClient}.
*/
public String getIdentifier() {
return identifier;
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message) {
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
return sendInternal(Flux.just(message), null);
}
/**
* Sends a message to a Service Bus queue or topic.
*
* @param message Message to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code message}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or
* the message could not be sent.
*/
public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(Flux.just(message), transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages
* exceed the maximum size of a single batch, an exception will be triggered and the send will fail.
* By default, the message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendIterable(messages, transactionContext);
}
/**
* Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed
* the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the
* message size is the max amount allowed on the link.
*
* @param messages Messages to be sent to Service Bus queue or topic.
*
* @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource.
*
* @throws NullPointerException if {@code messages} is {@code null}.
* @throws ServiceBusException if the message could not be sent or {@code message} is larger than the maximum size of the {@link
* ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) {
return sendIterable(messages, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch) {
return sendInternal(batch, null);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
*
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code batch}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException if the message batch could not be sent.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return sendInternal(batch, transactionContext);
}
/**
* Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
*
* @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch() {
return createMessageBatch(DEFAULT_BATCH_OPTIONS);
}
/**
* Creates an {@link ServiceBusMessageBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link ServiceBusMessageBatch}.
*
* @return A new {@link ServiceBusMessageBatch} configured with the given options.
* @throws NullPointerException if {@code options} is null.
* @throws ServiceBusException if the message batch could not be created.
* @throws IllegalStateException if sender is already disposed.
* @throws IllegalArgumentException if {@link CreateMessageBatchOptions
* maximum allowed size.
*/
public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch")));
}
if (Objects.isNull(options)) {
return monoError(LOGGER, new NullPointerException("'options' cannot be null."));
}
final int maxSize = options.getMaximumSizeInBytes();
final Mono<ServiceBusMessageBatch> createBatch = getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
final int maximumLinkSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
if (maxSize > maximumLinkSize) {
return monoError(LOGGER, new IllegalArgumentException(String.format(Locale.US,
"CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size"
+ " (%s bytes).", maxSize, maximumLinkSize)));
}
final int batchSize = maxSize > 0
? maxSize
: maximumLinkSize;
return Mono.just(new ServiceBusMessageBatch(isV2, batchSize, link::getErrorContext, tracer, messageSerializer));
})).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
return withRetry(createBatch, retryOptions,
String.format("entityPath[%s]: Creating batch timed out.", entityName))
.onErrorMap(this::mapError);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
* @param transactionContext to be set on message before sending to Service Bus.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionID} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext);
}
/**
* Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param message Message to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message.
*
* @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the message could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessageInternal(message, scheduledEnqueueTime, null);
}
/**
* Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled
* message is enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus queue or topic.
* @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) {
return scheduleMessages(messages, scheduledEnqueueTime, null);
}
/**
* Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is
* enqueued and made available to receivers only at the scheduled enqueue time.
*
* @param messages Messages to be sent to the Service Bus Queue.
* @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic.
* @param transactionContext Transaction to associate with the operation.
*
* @return Sequence numbers of the scheduled messages which can be used to cancel the messages.
*
* @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or
* {@code transactionContext.transactionId} is {@code null}.
* @throws ServiceBusException If the messages could not be scheduled or the {@code message} is larger than
* the maximum size of the {@link ServiceBusMessageBatch}.
* @throws IllegalStateException if sender is already disposed.
*/
public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return fluxError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages")));
}
if (Objects.isNull(messages)) {
return fluxError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return fluxError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return createMessageBatch()
.map(messageBatch -> {
int index = 0;
for (ServiceBusMessage message : messages) {
if (!messageBatch.tryAddMessage(message)) {
final String error = String.format(Locale.US,
"Messages exceed max allowed size for all the messages together. "
+ "Failed to add message at index '%s'.", index);
throw LOGGER.logExceptionAsError(new IllegalArgumentException(error));
}
++index;
}
return messageBatch;
})
.flatMapMany(messageBatch ->
tracer.traceScheduleFlux("ServiceBus.scheduleMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime,
messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)),
messageBatch.getMessages())
).onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of a scheduled message, if it was not already enqueued.
*
* @param sequenceNumber of the scheduled message to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws IllegalArgumentException if {@code sequenceNumber} is negative.
* @throws ServiceBusException If the messages could not be cancelled.
* @throws IllegalStateException if sender is already disposed.
*/
public Mono<Void> cancelScheduledMessage(long sequenceNumber) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage")));
}
if (sequenceNumber < 0) {
return monoError(LOGGER, new IllegalArgumentException("'sequenceNumber' cannot be negative."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessage",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(
Collections.singletonList(sequenceNumber), linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Cancels the enqueuing of an already scheduled message, if it was not already enqueued.
*
* @param sequenceNumbers of the scheduled messages to cancel.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*
* @throws NullPointerException if {@code sequenceNumbers} is null.
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if the scheduled messages cannot cancelled.
*/
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages")));
}
if (Objects.isNull(sequenceNumbers)) {
return monoError(LOGGER, new NullPointerException("'messages' cannot be null."));
}
return tracer.traceMono("ServiceBus.cancelScheduledMessages",
connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())))
.onErrorMap(this::mapError);
}
/**
* Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with
* {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction.
*
* @return A new {@link ServiceBusTransactionContext}.
*
* @throws IllegalStateException if sender is already disposed.
* @throws ServiceBusException if a transaction cannot be created.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction")));
}
return tracer.traceMono("ServiceBus.createTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())))
.onErrorMap(this::mapError);
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext to be committed.
*
* @return The {@link Mono} that finishes this operation on Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be committed.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return
tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
*
* @param transactionContext Transaction to rollback.
*
* @return The {@link Mono} that finishes this operation on the Service Bus resource.
*
* @throws IllegalStateException if sender is already disposed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
* @throws ServiceBusException if the transaction could not be rolled back.
*
* @see ServiceBusReceiverAsyncClient
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null."));
}
if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return tracer.traceMono("ServiceBus.rollbackTransaction",
connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId()))))
.onErrorMap(this::mapError);
}
/**
* Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying
* connection is also closed.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
onClientClose.run();
}
private Mono<Void> sendNextIterableBatch(ServiceBusMessage first, Iterator<ServiceBusMessage> messagesItr,
ServiceBusTransactionContext transaction) {
return this.createMessageBatch().flatMap(batch -> {
ServiceBusMessage next = first;
do {
if (!batch.tryAddMessage(next)) {
if (next == first) {
return monoError(LOGGER,
new IllegalArgumentException("The message " + first + " is too big to send even in a batch."));
}
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(next));
} else {
return this.sendMessages(batch).then(Mono.just(next));
}
}
if (messagesItr.hasNext()) {
next = messagesItr.next();
} else {
if (transaction != null) {
return this.sendMessages(batch, transaction).then(Mono.just(END));
} else {
return this.sendMessages(batch).then(Mono.just(END));
}
}
} while (true);
}).flatMap(missed -> {
if (missed == END) {
return Mono.empty();
} else {
return sendNextIterableBatch(missed, messagesItr, transaction);
}
});
}
private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime,
ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage")));
}
if (Objects.isNull(message)) {
return monoError(LOGGER, new NullPointerException("'message' cannot be null."));
}
if (Objects.isNull(scheduledEnqueueTime)) {
return monoError(LOGGER, new NullPointerException("'scheduledEnqueueTime' cannot be null."));
}
return tracer.traceScheduleMono("ServiceBus.scheduleMessage",
getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> {
int maxSize = size > 0
? size
: MAX_MESSAGE_LENGTH_BYTES;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityName, entityType))
.flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime,
maxSize, link.getLinkName(), transactionContext)
.next());
})),
message, message.getContext())
.onErrorMap(this::mapError);
}
/**
* Sends a message batch to the Azure Service Bus entity this sender is connected to.
* @param batch of messages which allows client to send maximum allowed size for a batch of messages.
* @param transactionContext to be set on batch message before sending to Service Bus.
*
* @return A {@link Mono} the finishes this operation on service bus resource.
*/
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages")));
}
if (Objects.isNull(batch)) {
return monoError(LOGGER, new NullPointerException("'batch' cannot be null."));
}
if (batch.getMessages().isEmpty()) {
LOGGER.info("Cannot send an EventBatch that is empty.");
return Mono.empty();
}
LOGGER.atInfo()
.addKeyValue("batchSize", batch.getCount())
.log("Sending batch.");
final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>());
batch.getMessages().forEach(serviceBusMessage -> {
final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage);
final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null
? new MessageAnnotations(new HashMap<>())
: message.getMessageAnnotations();
message.setMessageAnnotations(messageAnnotations);
messages.add(message);
});
final Mono<Void> sendMessage = getSendLink().flatMap(link -> {
if (transactionContext != null && transactionContext.getTransactionId() != null) {
final TransactionalState deliveryState = new TransactionalState();
deliveryState.setTxnId(Binary.create(transactionContext.getTransactionId()));
return messages.size() == 1
? link.send(messages.get(0), deliveryState)
: link.send(messages, deliveryState);
} else {
return messages.size() == 1
? link.send(messages.get(0))
: link.send(messages);
}
}).onErrorMap(RequestResponseChannelClosedException.class,
e -> {
return new AmqpException(true, e.getMessage(), e, null);
});
final Mono<Void> sendWithRetry = withRetry(sendMessage, retryOptions,
String.format("entityPath[%s], messages-count[%s]: Sending messages timed out.", entityName, batch.getCount()))
.onErrorMap(this::mapError);
return instrumentation.instrumentSendBatch("ServiceBus.send", sendWithRetry, batch.getMessages());
}
private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(LOGGER, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage")));
}
return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName)
.flatMap(link -> link.getLinkSize()
.flatMap(size -> {
final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES;
final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions()
.setMaximumSizeInBytes(batchSize);
return messages.collect(new AmqpMessageCollector(isV2, batchOptions, 1,
link::getErrorContext, tracer, messageSerializer));
})
.flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext)))
.onErrorMap(this::mapError);
}
private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches,
ServiceBusTransactionContext transactionContext) {
return eventBatches
.flatMap(messageBatch -> sendInternal(messageBatch, transactionContext))
.then()
.doOnError(error -> LOGGER.error("Error sending batch.", error));
}
private Mono<AmqpSendLink> getSendLink() {
return connectionProcessor
.flatMap(connection -> {
if (!CoreUtils.isNullOrEmpty(viaEntityName)) {
return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions,
entityName, identifier);
} else {
return connection.createSendLink(entityName, entityName, retryOptions, null, identifier);
}
})
.doOnNext(next -> linkName.compareAndSet(null, next.getLinkName()));
}
private Throwable mapError(Throwable throwable) {
if (!(throwable instanceof ServiceBusException)) {
return new ServiceBusException(throwable, ServiceBusErrorSource.SEND);
}
return throwable;
}
private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>,
List<ServiceBusMessageBatch>> {
private final int maxMessageSize;
private final Integer maxNumberOfBatches;
private final ErrorContextProvider contextProvider;
private final ServiceBusTracer tracer;
private final MessageSerializer serializer;
private final boolean isV2;
private volatile ServiceBusMessageBatch currentBatch;
AmqpMessageCollector(boolean isV2, CreateMessageBatchOptions options, Integer maxNumberOfBatches,
ErrorContextProvider contextProvider, ServiceBusTracer tracer, MessageSerializer serializer) {
this.maxNumberOfBatches = maxNumberOfBatches;
this.maxMessageSize = options.getMaximumSizeInBytes() > 0
? options.getMaximumSizeInBytes()
: MAX_MESSAGE_LENGTH_BYTES;
this.contextProvider = contextProvider;
this.tracer = tracer;
this.serializer = serializer;
this.isV2 = isV2;
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
}
@Override
public Supplier<List<ServiceBusMessageBatch>> supplier() {
return ArrayList::new;
}
@Override
public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() {
return (list, event) -> {
ServiceBusMessageBatch batch = currentBatch;
if (batch.tryAddMessage(event)) {
return;
}
if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) {
final String message = String.format(Locale.US,
"EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches);
throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message,
contextProvider.getErrorContext());
}
currentBatch = new ServiceBusMessageBatch(isV2, maxMessageSize, contextProvider, tracer, serializer);
currentBatch.tryAddMessage(event);
list.add(batch);
};
}
@Override
public BinaryOperator<List<ServiceBusMessageBatch>> combiner() {
return (existing, another) -> {
existing.addAll(another);
return existing;
};
}
@Override
public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() {
return list -> {
ServiceBusMessageBatch batch = currentBatch;
currentBatch = null;
if (batch != null) {
list.add(batch);
}
return list;
};
}
@Override
public Set<Characteristics> characteristics() {
return Collections.emptySet();
}
}
} |
I was thinking that some of these calls could be pushed to the test runners. Just an idea for one of the many things we can refactor "when we have time" | public void createVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
createVectorStoreWithFileAsyncRunner((storeId, fileId) -> {
StepVerifier.create(client.createVectorStoreFile(storeId, fileId))
.assertNext(vectorStoreFile -> {
assertNotNull(vectorStoreFile);
assertNotNull(vectorStoreFile.getId());
})
.verifyComplete();
deleteVectorStores(client, storeId);
client.deleteFile(fileId).block();
}, client);
} | client.deleteFile(fileId).block(); | public void createVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
StepVerifier.create(client.createVectorStoreFile(storeId, fileIds.get(0)))
.assertNext(vectorStoreFile -> {
assertNotNull(vectorStoreFile);
assertNotNull(vectorStoreFile.getId());
})
.verifyComplete();
} | class AzureVectorStoreAsyncTests extends AssistantsClientTestBase {
private AssistantsAsyncClient client;
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void createVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
createVectorStoreAsyncRunner(vectorStoreDetails -> {
AtomicReference<String> vectorStoreId = new AtomicReference<>();
StepVerifier.create(client.createVectorStore(vectorStoreDetails))
.assertNext(vectorStore -> {
assertNotNull(vectorStore);
vectorStoreId.set(vectorStore.getId());
assertNotNull(vectorStore.getId());
})
.verifyComplete();
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void updateVectorStoreName(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
modifyVectorStoreAsyncRunner((vectorStoreId, vectorStoreDetails) -> {
StepVerifier.create(client.modifyVectorStore(vectorStoreId, vectorStoreDetails))
.assertNext(vectorStore -> {
assertNotNull(vectorStore);
assertEquals(vectorStoreId, vectorStore.getId());
assertEquals(vectorStoreDetails.getName(), vectorStore.getName());
})
.verifyComplete();
deleteVectorStores(client, vectorStoreId);
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void getVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
getVectorStoreAsyncRunner((vectorStoreId) -> {
StepVerifier.create(client.getVectorStore(vectorStoreId))
.assertNext(vectorStore -> {
assertNotNull(vectorStore);
assertEquals(vectorStoreId, vectorStore.getId());
})
.verifyComplete();
deleteVectorStores(client, vectorStoreId);
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void deleteVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
deleteVectorStoreAsyncRunner((vectorStoreId) -> {
StepVerifier.create(client.deleteVectorStore(vectorStoreId))
.assertNext(deletionStatus -> {
assertTrue(deletionStatus.isDeleted());
assertEquals(deletionStatus.getId(), vectorStoreId);
})
.verifyComplete();
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void listVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
listVectorStoreAsyncRunner((store1, store2) -> {
StepVerifier.create(client.listVectorStores())
.assertNext(vectorStores -> {
assertNotNull(vectorStores);
assertFalse(vectorStores.getData().isEmpty());
vectorStores.getData().forEach(vectorStore -> {
assertNotNull(vectorStore.getId());
assertNotNull(vectorStore.getCreatedAt());
});
})
.verifyComplete();
deleteVectorStores(client, store1.getId(), store2.getId());
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void getVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
getVectorStoreFileAsyncRunner((vectorStoreFile, fileId) -> {
String storeId = vectorStoreFile.getVectorStoreId();
StepVerifier.create(client.getVectorStoreFile(storeId, fileId))
.assertNext(vectorStoreFileResponse -> {
assertNotNull(vectorStoreFileResponse);
assertEquals(fileId, vectorStoreFileResponse.getId());
})
.verifyComplete();
deleteVectorStores(client, storeId);
client.deleteFile(fileId).block();
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void listVectorStoreFiles(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
listVectorStoreFilesAsyncRunner((vectorStoreFile1, vectorStoreFile2) -> {
String storeId = vectorStoreFile1.getVectorStoreId();
StepVerifier.create(client.listVectorStoreFiles(storeId))
.assertNext(vectorStoreFiles -> {
assertNotNull(vectorStoreFiles);
assertFalse(vectorStoreFiles.getData().isEmpty());
vectorStoreFiles.getData().forEach(vectorStoreFile -> {
assertNotNull(vectorStoreFile.getId());
assertNotNull(vectorStoreFile.getCreatedAt());
});
})
.verifyComplete();
deleteVectorStores(client, storeId);
client.deleteFile(vectorStoreFile1.getId()).block();
client.deleteFile(vectorStoreFile2.getId()).block();
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void deleteVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
deleteVectorStoreFileAsyncRunner((vectorStoreFile, fileId) -> {
String storeId = vectorStoreFile.getVectorStoreId();
StepVerifier.create(client.deleteVectorStoreFile(storeId, fileId))
.assertNext(deletionStatus -> {
assertTrue(deletionStatus.isDeleted());
assertEquals(deletionStatus.getId(), fileId);
})
.verifyComplete();
deleteVectorStores(client, storeId);
client.deleteFile(fileId).block();
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void createVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
createVectorStoreWithFileBatchAsyncRunner((storeId, batchFiles) -> {
StepVerifier.create(client.createVectorStoreFileBatch(storeId, batchFiles))
.assertNext(vectorStoreFileBatch -> {
assertNotNull(vectorStoreFileBatch);
assertNotNull(vectorStoreFileBatch.getId());
assertEquals(2, vectorStoreFileBatch.getFileCounts().getTotal());
})
.verifyComplete();
deleteVectorStores(client, storeId);
for (String fileId : batchFiles) {
client.deleteFile(fileId).block();
}
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void getVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
getVectorStoreFileBatchAsyncRunner(vectorStoreFileBatch -> {
String storeId = vectorStoreFileBatch.getVectorStoreId();
String batchId = vectorStoreFileBatch.getId();
int totalFileCounts = vectorStoreFileBatch.getFileCounts().getTotal();
StepVerifier.create(client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()))
.assertNext(vectorStoreFileBatchResponse -> {
assertNotNull(vectorStoreFileBatchResponse);
assertEquals(storeId, vectorStoreFileBatchResponse.getVectorStoreId());
assertEquals(batchId, vectorStoreFileBatchResponse.getId());
assertEquals(totalFileCounts, vectorStoreFileBatchResponse.getFileCounts().getTotal());
})
.verifyComplete();
deleteVectorStores(client, storeId);
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void listVectorStoreFilesBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
listVectorStoreFilesBatchFilesAsyncRunner((storeId, batchId) -> {
List<String> files = new ArrayList<>();
StepVerifier.create(client.listVectorStoreFileBatchFiles(storeId, batchId))
.assertNext(vectorStoreFiles -> {
assertNotNull(vectorStoreFiles);
assertFalse(vectorStoreFiles.getData().isEmpty());
vectorStoreFiles.getData().forEach(vectorStoreFile -> {
String fid = vectorStoreFile.getId();
files.add(fid);
assertNotNull(fid);
assertNotNull(vectorStoreFile.getCreatedAt());
});
})
.verifyComplete();
deleteVectorStores(client, storeId);
for (String fid : files) {
client.deleteFile(fid).block();
}
}, client);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("Azure resource won't able to create a vector store with files")
public void cancelVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
cancelVectorStoreFileBatchAsyncRunner(vectorStore -> {
String storeId = vectorStore.getId();
String fileId = uploadFileAsync(client, "20210203_alphabet_10K.pdf", ASSISTANTS);
String fileId2 = uploadFileAsync(client, "20220924_aapl_10k.pdf", ASSISTANTS);
VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)).block();
StepVerifier.create(client.cancelVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()))
.assertNext(cancelVectorStoreFileBatch -> {
assertNotNull(cancelVectorStoreFileBatch);
assertEquals(vectorStoreFileBatch.getId(), cancelVectorStoreFileBatch.getId());
assertEquals(vectorStoreFileBatch.getFileCounts().getTotal(), cancelVectorStoreFileBatch.getFileCounts().getTotal());
})
.verifyComplete();
deleteVectorStores(client, storeId);
}, client);
}
private void deleteVectorStores(AssistantsAsyncClient client, String... vectorStoreIds) {
if (!CoreUtils.isNullOrEmpty(vectorStoreIds)) {
for (String vectorStoreId : vectorStoreIds) {
client.deleteVectorStore(vectorStoreId).block();
}
}
}
} | class AzureVectorStoreAsyncTests extends AssistantsClientTestBase {
private static final ClientLogger LOGGER = new ClientLogger(AzureVectorStoreAsyncTests.class);
private AssistantsAsyncClient client;
private VectorStore vectorStore;
private List<String> fileIds = new ArrayList<>();
protected void beforeTest(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
client = getAssistantsAsyncClient(httpClient, serviceVersion);
fileIds.add(uploadFileAsync(client, "20210203_alphabet_10K.pdf", ASSISTANTS));
VectorStoreOptions vectorStoreOptions = new VectorStoreOptions()
.setName("Financial Statements")
.setFileIds(fileIds);
StepVerifier.create(client.createVectorStore(vectorStoreOptions))
.assertNext(vectorStore -> {
this.vectorStore = vectorStore;
assertNotNull(vectorStore);
assertNotNull(vectorStore.getId());
})
.verifyComplete();
}
@Override
protected void afterTest() {
LOGGER.info("Cleaning up created resources.");
deleteVectorStoresAsync(client, vectorStore.getId());
deleteFilesAsync(client, fileIds.toArray(new String[0]));
LOGGER.info("Finished cleaning up resources.");
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void updateVectorStoreName(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
modifyVectorStoreRunner(vectorStoreDetails -> {
String vectorStoreId = vectorStore.getId();
StepVerifier.create(client.modifyVectorStore(vectorStoreId, vectorStoreDetails))
.assertNext(vectorStore -> {
assertNotNull(vectorStore);
assertEquals(vectorStoreId, vectorStore.getId());
assertEquals(vectorStoreDetails.getName(), vectorStore.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void getVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String vectorStoreId = vectorStore.getId();
StepVerifier.create(client.getVectorStore(vectorStoreId))
.assertNext(vectorStore -> {
assertNotNull(vectorStore);
assertEquals(vectorStoreId, vectorStore.getId());
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void listVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
StepVerifier.create(client.listVectorStores())
.assertNext(vectorStores -> {
assertNotNull(vectorStores);
assertFalse(vectorStores.getData().isEmpty());
vectorStores.getData().forEach(vectorStore -> {
assertNotNull(vectorStore.getId());
assertNotNull(vectorStore.getCreatedAt());
});
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void getVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId).block();
StepVerifier.create(client.getVectorStoreFile(storeId, fileId))
.assertNext(vectorStoreFileResponse -> {
assertNotNull(vectorStoreFileResponse);
assertEquals(vectorStoreFile.getVectorStoreId(), vectorStoreFileResponse.getVectorStoreId());
assertEquals(vectorStoreFile.getId(), vectorStoreFileResponse.getId());
assertEquals(fileId, vectorStoreFileResponse.getId());
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void listVectorStoreFiles(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
String fileId2 = uploadFileAsync(client, "20220924_aapl_10k.pdf", ASSISTANTS);
fileIds.add(fileId2);
VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId).block();
VectorStoreFile vectorStoreFile2 = client.createVectorStoreFile(storeId, fileId2).block();
assertEquals(fileId, vectorStoreFile.getId());
assertEquals(fileId2, vectorStoreFile2.getId());
StepVerifier.create(client.listVectorStoreFiles(storeId))
.assertNext(vectorStoreFiles -> {
assertNotNull(vectorStoreFiles);
assertFalse(vectorStoreFiles.getData().isEmpty());
vectorStoreFiles.getData().forEach(storeFile -> {
assertNotNull(storeFile.getId());
assertNotNull(storeFile.getCreatedAt());
});
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void deleteVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
StepVerifier.create(client.deleteVectorStoreFile(storeId, fileId))
.assertNext(deletionStatus -> {
assertTrue(deletionStatus.isDeleted());
assertEquals(fileId, deletionStatus.getId());
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void createVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
String fileId2 = uploadFileAsync(client, "20220924_aapl_10k.pdf", ASSISTANTS);
fileIds.add(fileId2);
StepVerifier.create(client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)))
.assertNext(vectorStoreFileBatch -> {
assertNotNull(vectorStoreFileBatch);
assertNotNull(vectorStoreFileBatch.getId());
assertEquals(2, vectorStoreFileBatch.getFileCounts().getTotal());
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void getVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
String fileId2 = uploadFileAsync(client, "20220924_aapl_10k.pdf", ASSISTANTS);
fileIds.add(fileId2);
VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)).block();
String batchId = vectorStoreFileBatch.getId();
int totalFileCounts = vectorStoreFileBatch.getFileCounts().getTotal();
StepVerifier.create(client.getVectorStoreFileBatch(storeId, batchId))
.assertNext(vectorStoreFileBatchResponse -> {
assertNotNull(vectorStoreFileBatchResponse);
assertEquals(storeId, vectorStoreFileBatchResponse.getVectorStoreId());
assertEquals(batchId, vectorStoreFileBatchResponse.getId());
assertEquals(totalFileCounts, vectorStoreFileBatchResponse.getFileCounts().getTotal());
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
@Disabled("This test is failing with 500. The server had an error processing your request. Sorry about that! "
+ "You can retry your request, or contact us through our help center at oai-assistants@microsoft.com if "
+ "you keep seeing this error.")
public void listVectorStoreFilesBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
String fileId2 = uploadFileAsync(client, "20220924_aapl_10k.pdf", ASSISTANTS);
fileIds.add(fileId2);
VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)).block();
while (vectorStoreFileBatch.getStatus() == VectorStoreFileBatchStatus.IN_PROGRESS) {
vectorStoreFileBatch = client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()).block();
}
StepVerifier.create(client.listVectorStoreFileBatchFiles(storeId, vectorStoreFileBatch.getId()))
.assertNext(vectorStoreFiles -> {
assertNotNull(vectorStoreFiles);
assertFalse(vectorStoreFiles.getData().isEmpty());
vectorStoreFiles.getData().forEach(vectorStoreFile -> {
String fid = vectorStoreFile.getId();
assertNotNull(fid);
assertNotNull(vectorStoreFile.getCreatedAt());
});
})
.verifyComplete();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.openai.assistants.TestUtils
public void cancelVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) {
beforeTest(httpClient, serviceVersion);
String storeId = vectorStore.getId();
String fileId = fileIds.get(0);
String fileId2 = uploadFileAsync(client, "20220924_aapl_10k.pdf", ASSISTANTS);
fileIds.add(fileId2);
VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)).block();
while (vectorStoreFileBatch.getStatus() == VectorStoreFileBatchStatus.IN_PROGRESS) {
vectorStoreFileBatch = client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()).block();
}
VectorStoreFileBatch finalVectorStoreFileBatch = vectorStoreFileBatch;
StepVerifier.create(client.cancelVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()))
.assertNext(cancelVectorStoreFileBatch -> {
assertNotNull(cancelVectorStoreFileBatch);
assertEquals(finalVectorStoreFileBatch.getId(), cancelVectorStoreFileBatch.getId());
assertEquals(finalVectorStoreFileBatch.getFileCounts().getTotal(), cancelVectorStoreFileBatch.getFileCounts().getTotal());
})
.verifyComplete();
}
} |
do you think it's worth changing `native` -> `graal` or do we prefer to group (any hypothetical future) native runtimes together? | private static String getJavaRuntime() {
if(isGraalVmNative()) {
return "!native";
}
return "";
} | return "!native"; | private static String getJavaRuntime() {
if(isGraalVmNative()) {
return "!native";
}
return "";
} | class VersionGenerator {
private static final String UNKNOWN_VERSION_VALUE = "unknown";
private static final String artifactName;
private static final String artifactVersion;
private static final String sdkVersionString;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
artifactName = properties.get("name");
artifactVersion = properties.get("version");
sdkVersionString =
"java"
+ getJavaVersion()
+ getJavaRuntime()
+ ":"
+ "otel"
+ getOpenTelemetryApiVersion()
+ ":"
+ "ext"
+ artifactVersion;
}
/**
* This method returns artifact name.
*
* @return artifactName.
*/
public static String getArtifactName() {
return artifactName;
}
/**
* This method returns artifact version.
*
* @return artifactVersion.
*/
public static String getArtifactVersion() {
return artifactVersion;
}
/**
* This method returns sdk version string as per the below format javaX:otelY:extZ X = Java
* version, Y = opentelemetry version, Z = exporter version
*
* @return sdkVersionString.
*/
public static String getSdkVersion() {
return sdkVersionString;
}
private static String getJavaVersion() {
return System.getProperty("java.version");
}
private static boolean isGraalVmNative() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static String getOpenTelemetryApiVersion() {
Map<String, String> properties =
CoreUtils.getProperties("io/opentelemetry/api/version.properties");
if (properties == null) {
return UNKNOWN_VERSION_VALUE;
}
String version = properties.get("sdk.version");
return version != null ? version : UNKNOWN_VERSION_VALUE;
}
private VersionGenerator() {
}
} | class VersionGenerator {
private static final String UNKNOWN_VERSION_VALUE = "unknown";
private static final String artifactName;
private static final String artifactVersion;
private static final String sdkVersionString;
static {
Map<String, String> properties =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
artifactName = properties.get("name");
artifactVersion = properties.get("version");
sdkVersionString =
"java"
+ getJavaVersion()
+ getJavaRuntime()
+ ":"
+ "otel"
+ getOpenTelemetryApiVersion()
+ ":"
+ "ext"
+ artifactVersion;
}
/**
* This method returns artifact name.
*
* @return artifactName.
*/
public static String getArtifactName() {
return artifactName;
}
/**
* This method returns artifact version.
*
* @return artifactVersion.
*/
public static String getArtifactVersion() {
return artifactVersion;
}
/**
* This method returns sdk version string as per the below format javaX:otelY:extZ X = Java
* version, Y = opentelemetry version, Z = exporter version
*
* @return sdkVersionString.
*/
public static String getSdkVersion() {
return sdkVersionString;
}
private static String getJavaVersion() {
return System.getProperty("java.version");
}
private static boolean isGraalVmNative() {
String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode");
return imageCode != null;
}
private static String getOpenTelemetryApiVersion() {
Map<String, String> properties =
CoreUtils.getProperties("io/opentelemetry/api/version.properties");
if (properties == null) {
return UNKNOWN_VERSION_VALUE;
}
String version = properties.get("sdk.version");
return version != null ? version : UNKNOWN_VERSION_VALUE;
}
private VersionGenerator() {
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.